Merge branch 'akpm' (incoming from Andrew)

Merge third batch of fixes from Andrew Morton:
 "Most of the rest.  I still have two large patchsets against AIO and
  IPC, but they're a bit stuck behind other trees and I'm about to
  vanish for six days.

   - random fixlets
   - inotify
   - more of the MM queue
   - show_stack() cleanups
   - DMI update
   - kthread/workqueue things
   - compat cleanups
   - epoll udpates
   - binfmt updates
   - nilfs2
   - hfs
   - hfsplus
   - ptrace
   - kmod
   - coredump
   - kexec
   - rbtree
   - pids
   - pidns
   - pps
   - semaphore tweaks
   - some w1 patches
   - relay updates
   - core Kconfig changes
   - sysrq tweaks"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (109 commits)
  Documentation/sysrq: fix inconstistent help message of sysrq key
  ethernet/emac/sysrq: fix inconstistent help message of sysrq key
  sparc/sysrq: fix inconstistent help message of sysrq key
  powerpc/xmon/sysrq: fix inconstistent help message of sysrq key
  ARM/etm/sysrq: fix inconstistent help message of sysrq key
  power/sysrq: fix inconstistent help message of sysrq key
  kgdb/sysrq: fix inconstistent help message of sysrq key
  lib/decompress.c: fix initconst
  notifier-error-inject: fix module names in Kconfig
  kernel/sys.c: make prctl(PR_SET_MM) generally available
  UAPI: remove empty Kbuild files
  menuconfig: print more info for symbol without prompts
  init/Kconfig: re-order CONFIG_EXPERT options to fix menuconfig display
  kconfig menu: move Virtualization drivers near other virtualization options
  Kconfig: consolidate CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
  relay: use macro PAGE_ALIGN instead of FIX_SIZE
  kernel/relay.c: move FIX_SIZE macro into relay.c
  kernel/relay.c: remove unused function argument actor
  drivers/w1/slaves/w1_ds2760.c: fix the error handling in w1_ds2760_add_slave()
  drivers/w1/slaves/w1_ds2781.c: fix the error handling in w1_ds2781_add_slave()
  ...
diff --git a/Documentation/ABI/testing/sysfs-devices-lpss_ltr b/Documentation/ABI/testing/sysfs-devices-lpss_ltr
new file mode 100644
index 0000000..ea9298d
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-lpss_ltr
@@ -0,0 +1,44 @@
+What:		/sys/devices/.../lpss_ltr/
+Date:		March 2013
+Contact:	Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Description:
+		The /sys/devices/.../lpss_ltr/ directory is only present for
+		devices included into the Intel Lynxpoint Low Power Subsystem
+		(LPSS).  If present, it contains attributes containing the LTR
+		mode and the values of LTR registers of the device.
+
+What:		/sys/devices/.../lpss_ltr/ltr_mode
+Date:		March 2013
+Contact:	Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Description:
+		The /sys/devices/.../lpss_ltr/ltr_mode attribute contains an
+		integer number (0 or 1) indicating whether or not the devices'
+		LTR functionality is working in the software mode (1).
+
+		This attribute is read-only.  If the device's runtime PM status
+		is not "active", attempts to read from this attribute cause
+		-EAGAIN to be returned.
+
+What:		/sys/devices/.../lpss_ltr/auto_ltr
+Date:		March 2013
+Contact:	Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Description:
+		The /sys/devices/.../lpss_ltr/auto_ltr attribute contains the
+		current value of the device's AUTO_LTR register (raw)
+		represented as an 8-digit hexadecimal number.
+
+		This attribute is read-only.  If the device's runtime PM status
+		is not "active", attempts to read from this attribute cause
+		-EAGAIN to be returned.
+
+What:		/sys/devices/.../lpss_ltr/sw_ltr
+Date:		March 2013
+Contact:	Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Description:
+		The /sys/devices/.../lpss_ltr/auto_ltr attribute contains the
+		current value of the device's SW_LTR register (raw) represented
+		as an 8-digit hexadecimal number.
+
+		This attribute is read-only.  If the device's runtime PM status
+		is not "active", attempts to read from this attribute cause
+		-EAGAIN to be returned.
diff --git a/Documentation/ABI/testing/sysfs-devices-power_resources_wakeup b/Documentation/ABI/testing/sysfs-devices-power_resources_wakeup
new file mode 100644
index 0000000..e0588fe
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-power_resources_wakeup
@@ -0,0 +1,13 @@
+What:		/sys/devices/.../power_resources_wakeup/
+Date:		April 2013
+Contact:	Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Description:
+		The /sys/devices/.../power_resources_wakeup/ directory is only
+		present for device objects representing ACPI device nodes that
+		require ACPI power resources for wakeup signaling.
+
+		If present, it contains symbolic links to device directories
+		representing ACPI power resources that need to be turned on for
+		the given device node to be able to signal wakeup.  The names of
+		the links are the same as the names of the directories they
+		point to.
diff --git a/Documentation/ABI/testing/sysfs-firmware-acpi b/Documentation/ABI/testing/sysfs-firmware-acpi
index dd930c8..ce9bee9 100644
--- a/Documentation/ABI/testing/sysfs-firmware-acpi
+++ b/Documentation/ABI/testing/sysfs-firmware-acpi
@@ -18,6 +18,32 @@
 		yoffset: The number of pixels between the top of the screen
 			 and the top edge of the image.
 
+What:		/sys/firmware/acpi/hotplug/
+Date:		February 2013
+Contact:	Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Description:
+		There are separate hotplug profiles for different classes of
+		devices supported by ACPI, such as containers, memory modules,
+		processors, PCI root bridges etc.  A hotplug profile for a given
+		class of devices is a collection of settings defining the way
+		that class of devices will be handled by the ACPI core hotplug
+		code.  Those profiles are represented in sysfs as subdirectories
+		of /sys/firmware/acpi/hotplug/.
+
+		The following setting is available to user space for each
+		hotplug profile:
+
+		enabled: If set, the ACPI core will handle notifications of
+			hotplug events associated with the given class of
+			devices and will allow those devices to be ejected with
+			the help of the _EJ0 control method.  Unsetting it
+			effectively disables hotplug for the correspoinding
+			class of devices.
+
+		The value of the above attribute is an integer number: 1 (set)
+		or 0 (unset).  Attempts to write any other values to it will
+		cause -EINVAL to be returned.
+
 What:		/sys/firmware/acpi/interrupts/
 Date:		February 2008
 Contact:	Len Brown <lenb@kernel.org>
diff --git a/Documentation/cpu-freq/cpu-drivers.txt b/Documentation/cpu-freq/cpu-drivers.txt
index 72f70b1..a3585ea 100644
--- a/Documentation/cpu-freq/cpu-drivers.txt
+++ b/Documentation/cpu-freq/cpu-drivers.txt
@@ -108,8 +108,9 @@
 				cpufreq_driver.target is called with
 				these values.
 
-For setting some of these values, the frequency table helpers might be
-helpful. See the section 2 for more information on them.
+For setting some of these values (cpuinfo.min[max]_freq, policy->min[max]), the
+frequency table helpers might be helpful. See the section 2 for more information
+on them.
 
 SMP systems normally have same clock source for a group of cpus. For these the
 .init() would be called only once for the first online cpu. Here the .init()
@@ -184,10 +185,10 @@
 As most cpufreq processors only allow for being set to a few specific
 frequencies, a "frequency table" with some functions might assist in
 some work of the processor driver. Such a "frequency table" consists
-of an array of struct cpufreq_freq_table entries, with any value in
+of an array of struct cpufreq_frequency_table entries, with any value in
 "index" you want to use, and the corresponding frequency in
 "frequency". At the end of the table, you need to add a
-cpufreq_freq_table entry with frequency set to CPUFREQ_TABLE_END. And
+cpufreq_frequency_table entry with frequency set to CPUFREQ_TABLE_END. And
 if you want to skip one entry in the table, set the frequency to 
 CPUFREQ_ENTRY_INVALID. The entries don't need to be in ascending
 order.
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index c7a2eb8..66f9cc3 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -167,6 +167,27 @@
 busy, rather than shifting back and forth in speed. This tunable has no
 effect on behavior at lower speeds/lower CPU loads.
 
+powersave_bias: this parameter takes a value between 0 to 1000. It
+defines the percentage (times 10) value of the target frequency that
+will be shaved off of the target. For example, when set to 100 -- 10%,
+when ondemand governor would have targeted 1000 MHz, it will target
+1000 MHz - (10% of 1000 MHz) = 900 MHz instead. This is set to 0
+(disabled) by default.
+When AMD frequency sensitivity powersave bias driver --
+drivers/cpufreq/amd_freq_sensitivity.c is loaded, this parameter
+defines the workload frequency sensitivity threshold in which a lower
+frequency is chosen instead of ondemand governor's original target.
+The frequency sensitivity is a hardware reported (on AMD Family 16h
+Processors and above) value between 0 to 100% that tells software how
+the performance of the workload running on a CPU will change when
+frequency changes. A workload with sensitivity of 0% (memory/IO-bound)
+will not perform any better on higher core frequency, whereas a
+workload with sensitivity of 100% (CPU-bound) will perform better
+higher the frequency. When the driver is loaded, this is set to 400
+by default -- for CPUs running workloads with sensitivity value below
+40%, a lower frequency is chosen. Unloading the driver or writing 0
+will disable this feature.
+
 
 2.5 Conservative
 ----------------
@@ -191,6 +212,12 @@
 default value of '20' it means that if the CPU usage needs to be below
 20% between samples to have the frequency decreased.
 
+sampling_down_factor: similar functionality as in "ondemand" governor.
+But in "conservative", it controls the rate at which the kernel makes
+a decision on when to decrease the frequency while running in any
+speed. Load for frequency increase is still evaluated every
+sampling rate.
+
 3. The Governor Interface in the CPUfreq Core
 =============================================
 
diff --git a/Documentation/cpuidle/driver.txt b/Documentation/cpuidle/driver.txt
index 7a9e09e..1b0d81d 100644
--- a/Documentation/cpuidle/driver.txt
+++ b/Documentation/cpuidle/driver.txt
@@ -15,11 +15,17 @@
 cpuidle driver initializes the cpuidle_device structure for each CPU device
 and registers with cpuidle using cpuidle_register_device.
 
+If all the idle states are the same, the wrapper function cpuidle_register
+could be used instead.
+
 It can also support the dynamic changes (like battery <-> AC), by using
 cpuidle_pause_and_lock, cpuidle_disable_device and cpuidle_enable_device,
 cpuidle_resume_and_unlock.
 
 Interfaces:
+extern int cpuidle_register(struct cpuidle_driver *drv,
+                            const struct cpumask *const coupled_cpus);
+extern int cpuidle_unregister(struct cpuidle_driver *drv);
 extern int cpuidle_register_driver(struct cpuidle_driver *drv);
 extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
 extern int cpuidle_register_device(struct cpuidle_device *dev);
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index b428556..e919228 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -1,10 +1,13 @@
 dm-raid
--------
+=======
 
 The device-mapper RAID (dm-raid) target provides a bridge from DM to MD.
 It allows the MD RAID drivers to be accessed using a device-mapper
 interface.
 
+
+Mapping Table Interface
+-----------------------
 The target is named "raid" and it accepts the following parameters:
 
   <raid_type> <#raid_params> <raid_params> \
@@ -47,7 +50,7 @@
     followed by optional parameters (in any order):
 	[sync|nosync]   Force or prevent RAID initialization.
 
-	[rebuild <idx>]	Rebuild drive number idx (first drive is 0).
+	[rebuild <idx>]	Rebuild drive number 'idx' (first drive is 0).
 
 	[daemon_sleep <ms>]
 		Interval between runs of the bitmap daemon that
@@ -56,9 +59,9 @@
 
 	[min_recovery_rate <kB/sec/disk>]  Throttle RAID initialization
 	[max_recovery_rate <kB/sec/disk>]  Throttle RAID initialization
-	[write_mostly <idx>]		   Drive index is write-mostly
-	[max_write_behind <sectors>]       See '-write-behind=' (man mdadm)
-	[stripe_cache <sectors>]           Stripe cache size (higher RAIDs only)
+	[write_mostly <idx>]		   Mark drive index 'idx' write-mostly.
+	[max_write_behind <sectors>]       See '--write-behind=' (man mdadm)
+	[stripe_cache <sectors>]           Stripe cache size (RAID 4/5/6 only)
 	[region_size <sectors>]
 		The region_size multiplied by the number of regions is the
 		logical size of the array.  The bitmap records the device
@@ -122,7 +125,7 @@
 	given for both the metadata and data drives for a given position.
 
 
-Example tables
+Example Tables
 --------------
 # RAID4 - 4 data drives, 1 parity (no metadata devices)
 # No metadata devices specified to hold superblock/bitmap info
@@ -141,26 +144,70 @@
         raid4 4 2048 sync min_recovery_rate 20 \
         5 8:17 8:18 8:33 8:34 8:49 8:50 8:65 8:66 8:81 8:82
 
+
+Status Output
+-------------
 'dmsetup table' displays the table used to construct the mapping.
 The optional parameters are always printed in the order listed
 above with "sync" or "nosync" always output ahead of the other
 arguments, regardless of the order used when originally loading the table.
 Arguments that can be repeated are ordered by value.
 
-'dmsetup status' yields information on the state and health of the
-array.
-The output is as follows:
+
+'dmsetup status' yields information on the state and health of the array.
+The output is as follows (normally a single line, but expanded here for
+clarity):
 1: <s> <l> raid \
-2:      <raid_type> <#devices> <1 health char for each dev> <resync_ratio>
+2:      <raid_type> <#devices> <health_chars> \
+3:      <sync_ratio> <sync_action> <mismatch_cnt>
 
 Line 1 is the standard output produced by device-mapper.
-Line 2 is produced by the raid target, and best explained by example:
-        0 1960893648 raid raid4 5 AAAAA 2/490221568
+Line 2 & 3 are produced by the raid target and are best explained by example:
+        0 1960893648 raid raid4 5 AAAAA 2/490221568 init 0
 Here we can see the RAID type is raid4, there are 5 devices - all of
-which are 'A'live, and the array is 2/490221568 complete with recovery.
-Faulty or missing devices are marked 'D'.  Devices that are out-of-sync
-are marked 'a'.
+which are 'A'live, and the array is 2/490221568 complete with its initial
+recovery.  Here is a fuller description of the individual fields:
+	<raid_type>     Same as the <raid_type> used to create the array.
+	<health_chars>  One char for each device, indicating: 'A' = alive and
+			in-sync, 'a' = alive but not in-sync, 'D' = dead/failed.
+	<sync_ratio>    The ratio indicating how much of the array has undergone
+			the process described by 'sync_action'.  If the
+			'sync_action' is "check" or "repair", then the process
+			of "resync" or "recover" can be considered complete.
+	<sync_action>   One of the following possible states:
+			idle    - No synchronization action is being performed.
+			frozen  - The current action has been halted.
+			resync  - Array is undergoing its initial synchronization
+				  or is resynchronizing after an unclean shutdown
+				  (possibly aided by a bitmap).
+			recover - A device in the array is being rebuilt or
+				  replaced.
+			check   - A user-initiated full check of the array is
+				  being performed.  All blocks are read and
+				  checked for consistency.  The number of
+				  discrepancies found are recorded in
+				  <mismatch_cnt>.  No changes are made to the
+				  array by this action.
+			repair  - The same as "check", but discrepancies are
+				  corrected.
+			reshape - The array is undergoing a reshape.
+	<mismatch_cnt>  The number of discrepancies found between mirror copies
+			in RAID1/10 or wrong parity values found in RAID4/5/6.
+			This value is valid only after a "check" of the array
+			is performed.  A healthy array has a 'mismatch_cnt' of 0.
 
+Message Interface
+-----------------
+The dm-raid target will accept certain actions through the 'message' interface.
+('man dmsetup' for more information on the message interface.)  These actions
+include:
+	"idle"   - Halt the current sync action.
+	"frozen" - Freeze the current sync action.
+	"resync" - Initiate/continue a resync.
+	"recover"- Initiate/continue a recover process.
+	"check"  - Initiate a check (i.e. a "scrub") of the array.
+	"repair" - Initiate a repair of the array.
+	"reshape"- Currently unsupported (-EINVAL).
 
 Version History
 ---------------
@@ -171,4 +218,7 @@
 1.3.1	Allow device replacement/rebuild for RAID 10
 1.3.2   Fix/improve redundancy checking for RAID10
 1.4.0	Non-functional change.  Removes arg from mapping function.
-1.4.1   Add RAID10 "far" and "offset" algorithm support.
+1.4.1   RAID10 fix redundancy validation checks (commit 55ebbb5).
+1.4.2   Add RAID10 "far" and "offset" algorithm support.
+1.5.0   Add message interface to allow manipulation of the sync_action.
+	New status (STATUSTYPE_INFO) fields: sync_action and mismatch_cnt.
diff --git a/Documentation/devicetree/bindings/ata/imx-pata.txt b/Documentation/devicetree/bindings/ata/imx-pata.txt
new file mode 100644
index 0000000..e38d734
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/imx-pata.txt
@@ -0,0 +1,17 @@
+* Freescale i.MX PATA Controller
+
+Required properties:
+- compatible: "fsl,imx27-pata"
+- reg: Address range of the PATA Controller
+- interrupts: The interrupt of the PATA Controller
+- clocks: the clocks for the PATA Controller
+
+Example:
+
+	pata: pata@83fe0000 {
+		compatible = "fsl,imx51-pata", "fsl,imx27-pata";
+		reg = <0x83fe0000 0x4000>;
+		interrupts = <70>;
+		clocks = <&clks 161>;
+		status = "disabled";
+	};
diff --git a/Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt b/Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt
new file mode 100644
index 0000000..0715695
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt
@@ -0,0 +1,65 @@
+Generic ARM big LITTLE cpufreq driver's DT glue
+-----------------------------------------------
+
+This is DT specific glue layer for generic cpufreq driver for big LITTLE
+systems.
+
+Both required and optional properties listed below must be defined
+under node /cpus/cpu@x. Where x is the first cpu inside a cluster.
+
+FIXME: Cpus should boot in the order specified in DT and all cpus for a cluster
+must be present contiguously. Generic DT driver will check only node 'x' for
+cpu:x.
+
+Required properties:
+- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt
+  for details
+
+Optional properties:
+- clock-latency: Specify the possible maximum transition latency for clock,
+  in unit of nanoseconds.
+
+Examples:
+
+cpus {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	cpu@0 {
+		compatible = "arm,cortex-a15";
+		reg = <0>;
+		next-level-cache = <&L2>;
+		operating-points = <
+			/* kHz    uV */
+			792000  1100000
+			396000  950000
+			198000  850000
+		>;
+		clock-latency = <61036>; /* two CLK32 periods */
+	};
+
+	cpu@1 {
+		compatible = "arm,cortex-a15";
+		reg = <1>;
+		next-level-cache = <&L2>;
+	};
+
+	cpu@100 {
+		compatible = "arm,cortex-a7";
+		reg = <100>;
+		next-level-cache = <&L2>;
+		operating-points = <
+			/* kHz    uV */
+			792000  950000
+			396000  750000
+			198000  450000
+		>;
+		clock-latency = <61036>; /* two CLK32 periods */
+	};
+
+	cpu@101 {
+		compatible = "arm,cortex-a7";
+		reg = <101>;
+		next-level-cache = <&L2>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
index 4416ccc..051f764 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
@@ -32,7 +32,7 @@
 			396000  950000
 			198000  850000
 		>;
-		transition-latency = <61036>; /* two CLK32 periods */
+		clock-latency = <61036>; /* two CLK32 periods */
 	};
 
 	cpu@1 {
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-exynos5440.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-exynos5440.txt
new file mode 100644
index 0000000..caff1a5
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-exynos5440.txt
@@ -0,0 +1,28 @@
+
+Exynos5440 cpufreq driver
+-------------------
+
+Exynos5440 SoC cpufreq driver for CPU frequency scaling.
+
+Required properties:
+- interrupts: Interrupt to know the completion of cpu frequency change.
+- operating-points: Table of frequencies and voltage CPU could be transitioned into,
+	in the decreasing order. Frequency should be in KHz units and voltage
+	should be in microvolts.
+
+Optional properties:
+- clock-latency: Clock monitor latency in microsecond.
+
+All the required listed above must be defined under node cpufreq.
+
+Example:
+--------
+	cpufreq@160000 {
+		compatible = "samsung,exynos5440-cpufreq";
+		reg = <0x160000 0x1000>;
+		interrupts = <0 57 0>;
+		operating-points = <
+				1000000 975000
+				800000  925000>;
+		clock-latency = <100000>;
+	};
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index 446859f..ad6a738 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -35,6 +35,8 @@
 fsl,mma8450		MMA8450Q: Xtrinsic Low-power, 3-axis Xtrinsic Accelerometer
 fsl,mpr121		MPR121: Proximity Capacitive Touch Sensor Controller
 fsl,sgtl5000		SGTL5000: Ultra Low-Power Audio Codec
+infineon,slb9635tt	Infineon SLB9635 (Soft-) I2C TPM (old protocol, max 100khz)
+infineon,slb9645tt	Infineon SLB9645 I2C TPM (new protocol, max 400khz)
 maxim,ds1050		5 Bit Programmable, Pulse-Width Modulator
 maxim,max1237		Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
 maxim,max6625		9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
diff --git a/Documentation/devicetree/bindings/power_supply/power_supply.txt b/Documentation/devicetree/bindings/power_supply/power_supply.txt
new file mode 100644
index 0000000..8391bfa
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/power_supply.txt
@@ -0,0 +1,23 @@
+Power Supply Core Support
+
+Optional Properties:
+ - power-supplies : This property is added to a supply in order to list the
+   devices which supply it power, referenced by their phandles.
+
+Example:
+
+	usb-charger: power@e {
+		compatible = "some,usb-charger";
+		...
+	};
+
+	ac-charger: power@c {
+		compatible = "some,ac-charger";
+		...
+	};
+
+	battery@b {
+		compatible = "some,battery";
+		...
+		power-supplies = <&usb-charger>, <&ac-charger>;
+	};
diff --git a/Documentation/devicetree/bindings/power_supply/tps65090.txt b/Documentation/devicetree/bindings/power_supply/tps65090.txt
new file mode 100644
index 0000000..8e5e0d3
--- /dev/null
+++ b/Documentation/devicetree/bindings/power_supply/tps65090.txt
@@ -0,0 +1,17 @@
+TPS65090 Frontend PMU with Switchmode Charger
+
+Required Properties:
+-compatible: "ti,tps65090-charger"
+
+Optional Properties:
+-ti,enable-low-current-chrg: Enables charging when a low current is detected
+ while the default logic is to stop charging.
+
+This node is a subnode of the tps65090 PMIC.
+
+Example:
+
+	tps65090-charger {
+		compatible = "ti,tps65090-charger";
+		ti,enable-low-current-chrg;
+	};
diff --git a/Documentation/hwmon/ab8500 b/Documentation/hwmon/ab8500
new file mode 100644
index 0000000..cf169c8
--- /dev/null
+++ b/Documentation/hwmon/ab8500
@@ -0,0 +1,22 @@
+Kernel driver ab8500
+====================
+
+Supported chips:
+  * ST-Ericsson AB8500
+    Prefix: 'ab8500'
+    Addresses scanned: -
+    Datasheet: http://www.stericsson.com/developers/documentation.jsp
+
+Authors:
+        Martin Persson <martin.persson@stericsson.com>
+        Hongbo Zhang <hongbo.zhang@linaro.org>
+
+Description
+-----------
+
+See also Documentation/hwmon/abx500. This is the ST-Ericsson AB8500 specific
+driver.
+
+Currently only the AB8500 internal sensor and one external sensor for battery
+temperature are monitored. Other GPADC channels can also be monitored if needed
+in future.
diff --git a/Documentation/hwmon/abx500 b/Documentation/hwmon/abx500
new file mode 100644
index 0000000..319a058
--- /dev/null
+++ b/Documentation/hwmon/abx500
@@ -0,0 +1,28 @@
+Kernel driver abx500
+====================
+
+Supported chips:
+  * ST-Ericsson ABx500 series
+    Prefix: 'abx500'
+    Addresses scanned: -
+    Datasheet: http://www.stericsson.com/developers/documentation.jsp
+
+Authors:
+        Martin Persson <martin.persson@stericsson.com>
+        Hongbo Zhang <hongbo.zhang@linaro.org>
+
+Description
+-----------
+
+Every ST-Ericsson Ux500 SOC consists of both ABx500 and DBx500 physically,
+this is kernel hwmon driver for ABx500.
+
+There are some GPADCs inside ABx500 which are designed for connecting to
+thermal sensors, and there is also a thermal sensor inside ABx500 too, which
+raises interrupt when critical temperature reached.
+
+This abx500 is a common layer which can monitor all of the sensors, every
+specific abx500 chip has its special configurations in its own file, e.g. some
+sensors can be configured invisible if they are not available on that chip, and
+the corresponding gpadc_addr should be set to 0, thus this sensor won't be
+polled.
diff --git a/Documentation/md.txt b/Documentation/md.txt
index 993fba3..e0ddd32 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -119,7 +119,7 @@
 The array is started with the RUN_ARRAY ioctl.
 
 Once started, new devices can be added.  They should have an
-appropriate superblock written to them, and then passed be in with
+appropriate superblock written to them, and then be passed in with
 ADD_NEW_DISK.
 
 Devices that have failed or are not yet active can be detached from an
@@ -131,7 +131,7 @@
 -------------------------------------------------------------
 
 An array can be 'created' by describing the array (level, chunksize
-etc) in a SET_ARRAY_INFO ioctl.  This must has major_version==0 and
+etc) in a SET_ARRAY_INFO ioctl.  This must have major_version==0 and
 raid_disks != 0.
 
 Then uninitialized devices can be added with ADD_NEW_DISK.  The
@@ -426,7 +426,7 @@
       offset
         This gives the location in the device (in sectors from the
         start) where data from the array will be stored.  Any part of
-        the device before this offset us not touched, unless it is
+        the device before this offset is not touched, unless it is
         used for storing metadata (Formats 1.1 and 1.2).
 
       size
@@ -440,7 +440,7 @@
         When the device is not 'in_sync', this records the number of
 	sectors from the start of the device which are known to be
 	correct.  This is normally zero, but during a recovery
-	operation is will steadily increase, and if the recovery is
+	operation it will steadily increase, and if the recovery is
 	interrupted, restoring this value can cause recovery to
 	avoid repeating the earlier blocks.  With v1.x metadata, this
 	value is saved and restored automatically.
@@ -468,7 +468,7 @@
 
 
 
-An active md device will also contain and entry for each active device
+An active md device will also contain an entry for each active device
 in the array.  These are named
 
     rdNN
@@ -482,7 +482,7 @@
 
 
 
-Active md devices for levels that support data redundancy (1,4,5,6)
+Active md devices for levels that support data redundancy (1,4,5,6,10)
 also have
 
    sync_action
@@ -494,7 +494,7 @@
                        failed/missing device
        idle          - nothing is happening
        check         - A full check of redundancy was requested and is
-                       happening.  This reads all block and checks
+                       happening.  This reads all blocks and checks
                        them. A repair may also happen for some raid
                        levels.
        repair        - A full check and repair is happening.  This is
@@ -522,7 +522,7 @@
 
    degraded
       This contains a count of the number of devices by which the
-      arrays is degraded.  So an optimal array with show '0'.  A
+      arrays is degraded.  So an optimal array will show '0'.  A
       single failed/missing drive will show '1', etc.
       This file responds to select/poll, any increase or decrease
       in the count of missing devices will trigger an event.
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index 6e95356..3af5ae6 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -17,6 +17,8 @@
 	%pF	versatile_init+0x0/0x110
 	%pf	versatile_init
 	%pS	versatile_init+0x0/0x110
+	%pSR	versatile_init+0x9/0x110
+		(with __builtin_extract_return_addr() translation)
 	%ps	versatile_init
 	%pB	prev_fn_of_versatile_init+0x88/0x88
 
diff --git a/Documentation/security/Smack.txt b/Documentation/security/Smack.txt
index 8a177e4..7a2d30c 100644
--- a/Documentation/security/Smack.txt
+++ b/Documentation/security/Smack.txt
@@ -117,6 +117,17 @@
 ambient
 	This contains the Smack label applied to unlabeled network
 	packets.
+change-rule
+	This interface allows modification of existing access control rules.
+	The format accepted on write is:
+		"%s %s %s %s"
+	where the first string is the subject label, the second the
+	object label, the third the access to allow and the fourth the
+	access to deny. The access strings may contain only the characters
+	"rwxat-". If a rule for a given subject and object exists it will be
+	modified by enabling the permissions in the third string and disabling
+	those in the fourth string. If there is no such rule it will be
+	created using the access specified in the third and the fourth strings.
 cipso
 	This interface allows a specific CIPSO header to be assigned
 	to a Smack label. The format accepted on write is:
diff --git a/MAINTAINERS b/MAINTAINERS
index 9cc4bd2a..75f6282 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2203,12 +2203,34 @@
 
 CPU FREQUENCY DRIVERS
 M:	Rafael J. Wysocki <rjw@sisk.pl>
+M:	Viresh Kumar <viresh.kumar@linaro.org>
 L:	cpufreq@vger.kernel.org
 L:	linux-pm@vger.kernel.org
 S:	Maintained
+T:	git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
 F:	drivers/cpufreq/
 F:	include/linux/cpufreq.h
 
+CPU FREQUENCY DRIVERS - ARM BIG LITTLE
+M:	Viresh Kumar <viresh.kumar@linaro.org>
+M:	Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+L:	cpufreq@vger.kernel.org
+L:	linux-pm@vger.kernel.org
+W:	http://www.arm.com/products/processors/technologies/biglittleprocessing.php
+S:	Maintained
+F:	drivers/cpufreq/arm_big_little.h
+F:	drivers/cpufreq/arm_big_little.c
+F:	drivers/cpufreq/arm_big_little_dt.c
+
+CPUIDLE DRIVERS
+M:	Rafael J. Wysocki <rjw@sisk.pl>
+M:	Daniel Lezcano <daniel.lezcano@linaro.org>
+L:	linux-pm@vger.kernel.org
+S:	Maintained
+T:	git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
+F:	drivers/cpuidle/*
+F:	include/linux/cpuidle.h
+
 CPUID/MSR DRIVER
 M:	"H. Peter Anvin" <hpa@zytor.com>
 S:	Maintained
@@ -6236,7 +6258,7 @@
 F:	drivers/scsi/pmcraid.*
 
 PMC SIERRA PM8001 DRIVER
-M:	jack_wang@usish.com
+M:	xjtuwjp@gmail.com
 M:	lindar_liu@usish.com
 L:	linux-scsi@vger.kernel.org
 S:	Supported
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a39e321..006f983 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -2163,7 +2163,6 @@
 menu "CPU Power Management"
 
 if ARCH_HAS_CPUFREQ
-
 source "drivers/cpufreq/Kconfig"
 
 config CPU_FREQ_IMX
@@ -2173,30 +2172,6 @@
 	help
 	  This enables the CPUfreq driver for i.MX CPUs.
 
-config CPU_FREQ_SA1100
-	bool
-
-config CPU_FREQ_SA1110
-	bool
-
-config CPU_FREQ_INTEGRATOR
-	tristate "CPUfreq driver for ARM Integrator CPUs"
-	depends on ARCH_INTEGRATOR && CPU_FREQ
-	default y
-	help
-	  This enables the CPUfreq driver for ARM Integrator CPUs.
-
-	  For details, take a look at <file:Documentation/cpu-freq>.
-
-	  If in doubt, say Y.
-
-config CPU_FREQ_PXA
-	bool
-	depends on CPU_FREQ && ARCH_PXA && PXA25x
-	default y
-	select CPU_FREQ_DEFAULT_GOV_USERSPACE
-	select CPU_FREQ_TABLE
-
 config CPU_FREQ_S3C
 	bool
 	help
diff --git a/arch/arm/configs/kirkwood_defconfig b/arch/arm/configs/kirkwood_defconfig
index 13482ea..93f3794 100644
--- a/arch/arm/configs/kirkwood_defconfig
+++ b/arch/arm/configs/kirkwood_defconfig
@@ -56,7 +56,6 @@
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_CPU_IDLE=y
-CONFIG_CPU_IDLE_KIRKWOOD=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/arm/mach-at91/cpuidle.c b/arch/arm/mach-at91/cpuidle.c
index 0c63815..48f1228 100644
--- a/arch/arm/mach-at91/cpuidle.c
+++ b/arch/arm/mach-at91/cpuidle.c
@@ -27,8 +27,6 @@
 
 #define AT91_MAX_STATES	2
 
-static DEFINE_PER_CPU(struct cpuidle_device, at91_cpuidle_device);
-
 /* Actual code that puts the SoC in different idle states */
 static int at91_enter_idle(struct cpuidle_device *dev,
 			struct cpuidle_driver *drv,
@@ -47,7 +45,6 @@
 static struct cpuidle_driver at91_idle_driver = {
 	.name			= "at91_idle",
 	.owner			= THIS_MODULE,
-	.en_core_tk_irqen	= 1,
 	.states[0]		= ARM_CPUIDLE_WFI_STATE,
 	.states[1]		= {
 		.enter			= at91_enter_idle,
@@ -61,20 +58,9 @@
 };
 
 /* Initialize CPU idle by registering the idle states */
-static int at91_init_cpuidle(void)
+static int __init at91_init_cpuidle(void)
 {
-	struct cpuidle_device *device;
-
-	device = &per_cpu(at91_cpuidle_device, smp_processor_id());
-	device->state_count = AT91_MAX_STATES;
-
-	cpuidle_register_driver(&at91_idle_driver);
-
-	if (cpuidle_register_device(device)) {
-		printk(KERN_ERR "at91_init_cpuidle: Failed registering\n");
-		return -EIO;
-	}
-	return 0;
+	return cpuidle_register(&at91_idle_driver, NULL);
 }
 
 device_initcall(at91_init_cpuidle);
diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
index fb5c1aa..dd1ffcc 100644
--- a/arch/arm/mach-davinci/Makefile
+++ b/arch/arm/mach-davinci/Makefile
@@ -37,7 +37,6 @@
 obj-$(CONFIG_MACH_OMAPL138_HAWKBOARD)	+= board-omapl138-hawk.o
 
 # Power Management
-obj-$(CONFIG_CPU_FREQ)			+= cpufreq.o
 obj-$(CONFIG_CPU_IDLE)			+= cpuidle.o
 obj-$(CONFIG_SUSPEND)			+= pm.o sleep.o
 obj-$(CONFIG_HAVE_CLK)			+= pm_domain.o
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index 5ac9e93..36aef3a 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -25,7 +25,6 @@
 
 #define DAVINCI_CPUIDLE_MAX_STATES	2
 
-static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
 static void __iomem *ddr2_reg_base;
 static bool ddr2_pdown;
 
@@ -50,14 +49,10 @@
 
 /* Actual code that puts the SoC in different idle states */
 static int davinci_enter_idle(struct cpuidle_device *dev,
-				struct cpuidle_driver *drv,
-						int index)
+			      struct cpuidle_driver *drv, int index)
 {
 	davinci_save_ddr_power(1, ddr2_pdown);
-
-	index = cpuidle_wrap_enter(dev,	drv, index,
-				arm_cpuidle_simple_enter);
-
+	cpu_do_idle();
 	davinci_save_ddr_power(0, ddr2_pdown);
 
 	return index;
@@ -66,7 +61,6 @@
 static struct cpuidle_driver davinci_idle_driver = {
 	.name			= "cpuidle-davinci",
 	.owner			= THIS_MODULE,
-	.en_core_tk_irqen	= 1,
 	.states[0]		= ARM_CPUIDLE_WFI_STATE,
 	.states[1]		= {
 		.enter			= davinci_enter_idle,
@@ -81,12 +75,8 @@
 
 static int __init davinci_cpuidle_probe(struct platform_device *pdev)
 {
-	int ret;
-	struct cpuidle_device *device;
 	struct davinci_cpuidle_config *pdata = pdev->dev.platform_data;
 
-	device = &per_cpu(davinci_cpuidle_device, smp_processor_id());
-
 	if (!pdata) {
 		dev_err(&pdev->dev, "cannot get platform data\n");
 		return -ENOENT;
@@ -96,20 +86,7 @@
 
 	ddr2_pdown = pdata->ddr2_pdown;
 
-	ret = cpuidle_register_driver(&davinci_idle_driver);
-	if (ret) {
-		dev_err(&pdev->dev, "failed to register driver\n");
-		return ret;
-	}
-
-	ret = cpuidle_register_device(device);
-	if (ret) {
-		dev_err(&pdev->dev, "failed to register device\n");
-		cpuidle_unregister_driver(&davinci_idle_driver);
-		return ret;
-	}
-
-	return 0;
+	return cpuidle_register(&davinci_idle_driver, NULL);
 }
 
 static struct platform_driver davinci_cpuidle_driver = {
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 70f94c8..d5dde07 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -72,10 +72,12 @@
 	bool "SAMSUNG EXYNOS5440"
 	default y
 	depends on ARCH_EXYNOS5
+	select ARCH_HAS_OPP
 	select ARM_ARCH_TIMER
 	select AUTO_ZRELADDR
 	select PINCTRL
 	select PINCTRL_EXYNOS5440
+	select PM_OPP
 	help
 	  Enable EXYNOS5440 SoC support
 
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index fcfe025..498a7a23 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -58,7 +58,6 @@
 static struct cpuidle_driver exynos4_idle_driver = {
 	.name			= "exynos4_idle",
 	.owner			= THIS_MODULE,
-	.en_core_tk_irqen	= 1,
 };
 
 /* Ext-GIC nIRQ/nFIQ is the only wakeup source in AFTR */
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index c4ce090..cb70961 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -30,7 +30,7 @@
 obj-$(CONFIG_CPU_FREQ_IMX)    += cpufreq.o
 
 ifeq ($(CONFIG_CPU_IDLE),y)
-obj-y += cpuidle.o
+obj-$(CONFIG_SOC_IMX5) += cpuidle-imx5.o
 obj-$(CONFIG_SOC_IMX6Q) += cpuidle-imx6q.o
 endif
 
diff --git a/arch/arm/mach-imx/cpufreq.c b/arch/arm/mach-imx/cpufreq.c
index d8c75c3..387dc4c 100644
--- a/arch/arm/mach-imx/cpufreq.c
+++ b/arch/arm/mach-imx/cpufreq.c
@@ -87,13 +87,12 @@
 
 	freqs.old = clk_get_rate(cpu_clk) / 1000;
 	freqs.new = freq_Hz / 1000;
-	freqs.cpu = 0;
 	freqs.flags = 0;
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	ret = set_cpu_freq(freq_Hz);
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	return ret;
 }
@@ -145,14 +144,11 @@
 	imx_freq_table[i].frequency = CPUFREQ_TABLE_END;
 
 	policy->cur = clk_get_rate(cpu_clk) / 1000;
-	policy->min = policy->cpuinfo.min_freq = cpu_freq_khz_min;
-	policy->max = policy->cpuinfo.max_freq = cpu_freq_khz_max;
 
 	/* Manual states, that PLL stabilizes in two CLK32 periods */
 	policy->cpuinfo.transition_latency = 2 * NANOSECOND / CLK32_FREQ;
 
 	ret = cpufreq_frequency_table_cpuinfo(policy, imx_freq_table);
-
 	if (ret < 0) {
 		printk(KERN_ERR "%s: failed to register i.MXC CPUfreq with error code %d\n",
 		       __func__, ret);
diff --git a/arch/arm/mach-imx/cpuidle-imx5.c b/arch/arm/mach-imx/cpuidle-imx5.c
new file mode 100644
index 0000000..5a47e3c
--- /dev/null
+++ b/arch/arm/mach-imx/cpuidle-imx5.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/cpuidle.h>
+#include <linux/module.h>
+#include <asm/system_misc.h>
+
+static int imx5_cpuidle_enter(struct cpuidle_device *dev,
+			      struct cpuidle_driver *drv, int index)
+{
+	arm_pm_idle();
+	return index;
+}
+
+static struct cpuidle_driver imx5_cpuidle_driver = {
+	.name             = "imx5_cpuidle",
+	.owner            = THIS_MODULE,
+	.states[0] = {
+		.enter            = imx5_cpuidle_enter,
+		.exit_latency     = 2,
+		.target_residency = 1,
+		.flags            = CPUIDLE_FLAG_TIME_VALID,
+		.name             = "IMX5 SRPG",
+		.desc             = "CPU state retained,powered off",
+	},
+	.state_count = 1,
+};
+
+int __init imx5_cpuidle_init(void)
+{
+	return cpuidle_register(&imx5_cpuidle_driver, NULL);
+}
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
index d533e26..23ddfb6 100644
--- a/arch/arm/mach-imx/cpuidle-imx6q.c
+++ b/arch/arm/mach-imx/cpuidle-imx6q.c
@@ -6,7 +6,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/clockchips.h>
 #include <linux/cpuidle.h>
 #include <linux/module.h>
 #include <asm/cpuidle.h>
@@ -21,10 +20,6 @@
 static int imx6q_enter_wait(struct cpuidle_device *dev,
 			    struct cpuidle_driver *drv, int index)
 {
-	int cpu = dev->cpu;
-
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
-
 	if (atomic_inc_return(&master) == num_online_cpus()) {
 		/*
 		 * With this lock, we prevent other cpu to exit and enter
@@ -43,26 +38,13 @@
 	cpu_do_idle();
 done:
 	atomic_dec(&master);
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
 
 	return index;
 }
 
-/*
- * For each cpu, setup the broadcast timer because local timer
- * stops for the states other than WFI.
- */
-static void imx6q_setup_broadcast_timer(void *arg)
-{
-	int cpu = smp_processor_id();
-
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
-}
-
 static struct cpuidle_driver imx6q_cpuidle_driver = {
 	.name = "imx6q_cpuidle",
 	.owner = THIS_MODULE,
-	.en_core_tk_irqen = 1,
 	.states = {
 		/* WFI */
 		ARM_CPUIDLE_WFI_STATE,
@@ -70,7 +52,8 @@
 		{
 			.exit_latency = 50,
 			.target_residency = 75,
-			.flags = CPUIDLE_FLAG_TIME_VALID,
+			.flags = CPUIDLE_FLAG_TIME_VALID |
+			         CPUIDLE_FLAG_TIMER_STOP,
 			.enter = imx6q_enter_wait,
 			.name = "WAIT",
 			.desc = "Clock off",
@@ -88,8 +71,5 @@
 	/* Set chicken bit to get a reliable WAIT mode support */
 	imx6q_set_chicken_bit();
 
-	/* Configure the broadcast timer on each cpu */
-	on_each_cpu(imx6q_setup_broadcast_timer, NULL, 1);
-
-	return imx_cpuidle_init(&imx6q_cpuidle_driver);
+	return cpuidle_register(&imx6q_cpuidle_driver, NULL);
 }
diff --git a/arch/arm/mach-imx/cpuidle.c b/arch/arm/mach-imx/cpuidle.c
deleted file mode 100644
index d4cb511..0000000
--- a/arch/arm/mach-imx/cpuidle.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright 2012 Freescale Semiconductor, Inc.
- * Copyright 2012 Linaro Ltd.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-
-#include <linux/cpuidle.h>
-#include <linux/err.h>
-#include <linux/hrtimer.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-
-static struct cpuidle_device __percpu * imx_cpuidle_devices;
-
-static void __init imx_cpuidle_devices_uninit(void)
-{
-	int cpu_id;
-	struct cpuidle_device *dev;
-
-	for_each_possible_cpu(cpu_id) {
-		dev = per_cpu_ptr(imx_cpuidle_devices, cpu_id);
-		cpuidle_unregister_device(dev);
-	}
-
-	free_percpu(imx_cpuidle_devices);
-}
-
-int __init imx_cpuidle_init(struct cpuidle_driver *drv)
-{
-	struct cpuidle_device *dev;
-	int cpu_id, ret;
-
-	if (drv->state_count > CPUIDLE_STATE_MAX) {
-		pr_err("%s: state_count exceeds maximum\n", __func__);
-		return -EINVAL;
-	}
-
-	ret = cpuidle_register_driver(drv);
-	if (ret) {
-		pr_err("%s: Failed to register cpuidle driver with error: %d\n",
-			 __func__, ret);
-		return ret;
-	}
-
-	imx_cpuidle_devices = alloc_percpu(struct cpuidle_device);
-	if (imx_cpuidle_devices == NULL) {
-		ret = -ENOMEM;
-		goto unregister_drv;
-	}
-
-	/* initialize state data for each cpuidle_device */
-	for_each_possible_cpu(cpu_id) {
-		dev = per_cpu_ptr(imx_cpuidle_devices, cpu_id);
-		dev->cpu = cpu_id;
-		dev->state_count = drv->state_count;
-
-		ret = cpuidle_register_device(dev);
-		if (ret) {
-			pr_err("%s: Failed to register cpu %u, error: %d\n",
-				__func__, cpu_id, ret);
-			goto uninit;
-		}
-	}
-
-	return 0;
-
-uninit:
-	imx_cpuidle_devices_uninit();
-
-unregister_drv:
-	cpuidle_unregister_driver(drv);
-	return ret;
-}
diff --git a/arch/arm/mach-imx/cpuidle.h b/arch/arm/mach-imx/cpuidle.h
index e092d13..786f98e 100644
--- a/arch/arm/mach-imx/cpuidle.h
+++ b/arch/arm/mach-imx/cpuidle.h
@@ -10,18 +10,16 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
-#include <linux/cpuidle.h>
-
 #ifdef CONFIG_CPU_IDLE
-extern int imx_cpuidle_init(struct cpuidle_driver *drv);
+extern int imx5_cpuidle_init(void);
 extern int imx6q_cpuidle_init(void);
 #else
-static inline int imx_cpuidle_init(struct cpuidle_driver *drv)
+static inline int imx5_cpuidle_init(void)
 {
-	return -ENODEV;
+	return 0;
 }
 static inline int imx6q_cpuidle_init(void)
 {
-	return -ENODEV;
+	return 0;
 }
 #endif
diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c
index f67fd7e..82e79c6 100644
--- a/arch/arm/mach-imx/pm-imx5.c
+++ b/arch/arm/mach-imx/pm-imx5.c
@@ -149,33 +149,6 @@
 	imx5_cpu_do_idle();
 }
 
-static int imx5_cpuidle_enter(struct cpuidle_device *dev,
-				struct cpuidle_driver *drv, int idx)
-{
-	int ret;
-
-	ret = imx5_cpu_do_idle();
-	if (ret < 0)
-		return ret;
-
-	return idx;
-}
-
-static struct cpuidle_driver imx5_cpuidle_driver = {
-	.name			= "imx5_cpuidle",
-	.owner			= THIS_MODULE,
-	.en_core_tk_irqen	= 1,
-	.states[0]	= {
-		.enter			= imx5_cpuidle_enter,
-		.exit_latency		= 2,
-		.target_residency	= 1,
-		.flags			= CPUIDLE_FLAG_TIME_VALID,
-		.name			= "IMX5 SRPG",
-		.desc			= "CPU state retained,powered off",
-	},
-	.state_count		= 1,
-};
-
 static int __init imx5_pm_common_init(void)
 {
 	int ret;
@@ -193,8 +166,7 @@
 	/* Set the registers to the default cpu idle state. */
 	mx5_cpu_lp_set(IMX5_DEFAULT_CPU_IDLE_STATE);
 
-	imx_cpuidle_init(&imx5_cpuidle_driver);
-	return 0;
+	return imx5_cpuidle_init();
 }
 
 void __init imx51_pm_init(void)
diff --git a/arch/arm/mach-integrator/Makefile b/arch/arm/mach-integrator/Makefile
index 5521d18..d14d6b7 100644
--- a/arch/arm/mach-integrator/Makefile
+++ b/arch/arm/mach-integrator/Makefile
@@ -9,5 +9,4 @@
 obj-$(CONFIG_ARCH_INTEGRATOR_CP)	+= integrator_cp.o
 
 obj-$(CONFIG_PCI)			+= pci_v3.o pci.o
-obj-$(CONFIG_CPU_FREQ_INTEGRATOR)	+= cpu.o
 obj-$(CONFIG_INTEGRATOR_IMPD1)		+= impd1.o
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index d6ba13e..14522d0 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -249,7 +249,6 @@
 extern int omap4_finish_suspend(unsigned long cpu_state);
 extern void omap4_cpu_resume(void);
 extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
-extern u32 omap4_mpuss_read_prev_context_state(void);
 #else
 static inline int omap4_enter_lowpower(unsigned int cpu,
 					unsigned int power_state)
@@ -277,10 +276,6 @@
 static inline void omap4_cpu_resume(void)
 {}
 
-static inline u32 omap4_mpuss_read_prev_context_state(void)
-{
-	return 0;
-}
 #endif
 
 struct omap_sdrc_params;
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 80392fc..cca045c 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -26,6 +26,7 @@
 #include <linux/cpuidle.h>
 #include <linux/export.h>
 #include <linux/cpu_pm.h>
+#include <asm/cpuidle.h>
 
 #include "powerdomain.h"
 #include "clockdomain.h"
@@ -99,11 +100,15 @@
 	},
 };
 
-/* Private functions */
-
-static int __omap3_enter_idle(struct cpuidle_device *dev,
-				struct cpuidle_driver *drv,
-				int index)
+/**
+ * omap3_enter_idle - Programs OMAP3 to enter the specified state
+ * @dev: cpuidle device
+ * @drv: cpuidle driver
+ * @index: the index of state to be entered
+ */
+static int omap3_enter_idle(struct cpuidle_device *dev,
+			    struct cpuidle_driver *drv,
+			    int index)
 {
 	struct omap3_idle_statedata *cx = &omap3_idle_data[index];
 
@@ -149,22 +154,6 @@
 }
 
 /**
- * omap3_enter_idle - Programs OMAP3 to enter the specified state
- * @dev: cpuidle device
- * @drv: cpuidle driver
- * @index: the index of state to be entered
- *
- * Called from the CPUidle framework to program the device to the
- * specified target state selected by the governor.
- */
-static inline int omap3_enter_idle(struct cpuidle_device *dev,
-				struct cpuidle_driver *drv,
-				int index)
-{
-	return cpuidle_wrap_enter(dev, drv, index, __omap3_enter_idle);
-}
-
-/**
  * next_valid_state - Find next valid C-state
  * @dev: cpuidle device
  * @drv: cpuidle driver
@@ -271,11 +260,9 @@
 	return ret;
 }
 
-static DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
-
 static struct cpuidle_driver omap3_idle_driver = {
-	.name =		"omap3_idle",
-	.owner =	THIS_MODULE,
+	.name             = "omap3_idle",
+	.owner            = THIS_MODULE,
 	.states = {
 		{
 			.enter		  = omap3_enter_idle_bm,
@@ -348,8 +335,6 @@
  */
 int __init omap3_idle_init(void)
 {
-	struct cpuidle_device *dev;
-
 	mpu_pd = pwrdm_lookup("mpu_pwrdm");
 	core_pd = pwrdm_lookup("core_pwrdm");
 	per_pd = pwrdm_lookup("per_pwrdm");
@@ -358,16 +343,5 @@
 	if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
 		return -ENODEV;
 
-	cpuidle_register_driver(&omap3_idle_driver);
-
-	dev = &per_cpu(omap3_idle_dev, smp_processor_id());
-	dev->cpu = 0;
-
-	if (cpuidle_register_device(dev)) {
-		printk(KERN_ERR "%s: CPUidle register device failed\n",
-		       __func__);
-		return -EIO;
-	}
-
-	return 0;
+	return cpuidle_register(&omap3_idle_driver, NULL);
 }
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index d639aef..5a286b5 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -1,7 +1,7 @@
 /*
- * OMAP4 CPU idle Routines
+ * OMAP4+ CPU idle Routines
  *
- * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011-2013 Texas Instruments, Inc.
  * Santosh Shilimkar <santosh.shilimkar@ti.com>
  * Rajendra Nayak <rnayak@ti.com>
  *
@@ -14,8 +14,8 @@
 #include <linux/cpuidle.h>
 #include <linux/cpu_pm.h>
 #include <linux/export.h>
-#include <linux/clockchips.h>
 
+#include <asm/cpuidle.h>
 #include <asm/proc-fns.h>
 
 #include "common.h"
@@ -24,13 +24,13 @@
 #include "clockdomain.h"
 
 /* Machine specific information */
-struct omap4_idle_statedata {
+struct idle_statedata {
 	u32 cpu_state;
 	u32 mpu_logic_state;
 	u32 mpu_state;
 };
 
-static struct omap4_idle_statedata omap4_idle_data[] = {
+static struct idle_statedata omap4_idle_data[] = {
 	{
 		.cpu_state = PWRDM_POWER_ON,
 		.mpu_state = PWRDM_POWER_ON,
@@ -53,11 +53,12 @@
 
 static atomic_t abort_barrier;
 static bool cpu_done[NR_CPUS];
+static struct idle_statedata *state_ptr = &omap4_idle_data[0];
 
 /* Private functions */
 
 /**
- * omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions
+ * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions
  * @dev: cpuidle device
  * @drv: cpuidle driver
  * @index: the index of state to be entered
@@ -66,7 +67,7 @@
  * specified low power state selected by the governor.
  * Returns the amount of time spent in the low power state.
  */
-static int omap4_enter_idle_simple(struct cpuidle_device *dev,
+static int omap_enter_idle_simple(struct cpuidle_device *dev,
 			struct cpuidle_driver *drv,
 			int index)
 {
@@ -77,12 +78,11 @@
 	return index;
 }
 
-static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
+static int omap_enter_idle_coupled(struct cpuidle_device *dev,
 			struct cpuidle_driver *drv,
 			int index)
 {
-	struct omap4_idle_statedata *cx = &omap4_idle_data[index];
-	int cpu_id = smp_processor_id();
+	struct idle_statedata *cx = state_ptr + index;
 
 	local_fiq_disable();
 
@@ -109,8 +109,6 @@
 		}
 	}
 
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
-
 	/*
 	 * Call idle CPU PM enter notifier chain so that
 	 * VFP and per CPU interrupt context is saved.
@@ -149,11 +147,10 @@
 	 * Call idle CPU cluster PM exit notifier chain
 	 * to restore GIC and wakeupgen context.
 	 */
-	if (omap4_mpuss_read_prev_context_state())
+	if ((cx->mpu_state == PWRDM_POWER_RET) &&
+		(cx->mpu_logic_state == PWRDM_POWER_OFF))
 		cpu_cluster_pm_exit();
 
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
-
 fail:
 	cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
 	cpu_done[dev->cpu] = false;
@@ -163,49 +160,38 @@
 	return index;
 }
 
-/*
- * For each cpu, setup the broadcast timer because local timers
- * stops for the states above C1.
- */
-static void omap_setup_broadcast_timer(void *arg)
-{
-	int cpu = smp_processor_id();
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
-}
-
-static DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
-
 static struct cpuidle_driver omap4_idle_driver = {
 	.name				= "omap4_idle",
 	.owner				= THIS_MODULE,
-	.en_core_tk_irqen		= 1,
 	.states = {
 		{
 			/* C1 - CPU0 ON + CPU1 ON + MPU ON */
 			.exit_latency = 2 + 2,
 			.target_residency = 5,
 			.flags = CPUIDLE_FLAG_TIME_VALID,
-			.enter = omap4_enter_idle_simple,
+			.enter = omap_enter_idle_simple,
 			.name = "C1",
-			.desc = "MPUSS ON"
+			.desc = "CPUx ON, MPUSS ON"
 		},
 		{
 			/* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
 			.exit_latency = 328 + 440,
 			.target_residency = 960,
-			.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
-			.enter = omap4_enter_idle_coupled,
+			.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
+			         CPUIDLE_FLAG_TIMER_STOP,
+			.enter = omap_enter_idle_coupled,
 			.name = "C2",
-			.desc = "MPUSS CSWR",
+			.desc = "CPUx OFF, MPUSS CSWR",
 		},
 		{
 			/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
 			.exit_latency = 460 + 518,
 			.target_residency = 1100,
-			.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
-			.enter = omap4_enter_idle_coupled,
+			.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
+			         CPUIDLE_FLAG_TIMER_STOP,
+			.enter = omap_enter_idle_coupled,
 			.name = "C3",
-			.desc = "MPUSS OSWR",
+			.desc = "CPUx OFF, MPUSS OSWR",
 		},
 	},
 	.state_count = ARRAY_SIZE(omap4_idle_data),
@@ -215,16 +201,13 @@
 /* Public functions */
 
 /**
- * omap4_idle_init - Init routine for OMAP4 idle
+ * omap4_idle_init - Init routine for OMAP4+ idle
  *
- * Registers the OMAP4 specific cpuidle driver to the cpuidle
+ * Registers the OMAP4+ specific cpuidle driver to the cpuidle
  * framework with the valid set of states.
  */
 int __init omap4_idle_init(void)
 {
-	struct cpuidle_device *dev;
-	unsigned int cpu_id = 0;
-
 	mpu_pd = pwrdm_lookup("mpu_pwrdm");
 	cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
 	cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
@@ -236,22 +219,5 @@
 	if (!cpu_clkdm[0] || !cpu_clkdm[1])
 		return -ENODEV;
 
-	/* Configure the broadcast timer on each cpu */
-	on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
-
-	for_each_cpu(cpu_id, cpu_online_mask) {
-		dev = &per_cpu(omap4_idle_dev, cpu_id);
-		dev->cpu = cpu_id;
-#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
-		dev->coupled_cpus = *cpu_online_mask;
-#endif
-		cpuidle_register_driver(&omap4_idle_driver);
-
-		if (cpuidle_register_device(dev)) {
-			pr_err("%s: CPUidle register failed\n", __func__);
-			return -EIO;
-		}
-	}
-
-	return 0;
+	return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
 }
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index 8bcb64b..e80327b 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -139,20 +139,6 @@
 	}
 }
 
-/**
- * omap4_mpuss_read_prev_context_state:
- * Function returns the MPUSS previous context state
- */
-u32 omap4_mpuss_read_prev_context_state(void)
-{
-	u32 reg;
-
-	reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
-		OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
-	reg &= OMAP4430_LOSTCONTEXT_DFF_MASK;
-	return reg;
-}
-
 /*
  * Store the CPU cluster state for L2X0 low power operations.
  */
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index dec5533..e742118 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -264,6 +264,12 @@
 	omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", "iva");
 }
 
+static inline void omap_init_cpufreq(void)
+{
+	struct platform_device_info devinfo = { .name = "omap-cpufreq", };
+	platform_device_register_full(&devinfo);
+}
+
 static int __init omap2_common_pm_init(void)
 {
 	if (!of_have_populated_dt())
@@ -293,6 +299,9 @@
 
 		/* Smartreflex device init */
 		omap_devinit_smartreflex();
+
+		/* cpufreq dummy device instantiation */
+		omap_init_cpufreq();
 	}
 
 #ifdef CONFIG_SUSPEND
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index 12c5005..648867a 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -7,12 +7,6 @@
 				   time.o reset.o
 obj-$(CONFIG_PM)		+= pm.o sleep.o standby.o
 
-ifeq ($(CONFIG_CPU_FREQ),y)
-obj-$(CONFIG_PXA25x)		+= cpufreq-pxa2xx.o
-obj-$(CONFIG_PXA27x)		+= cpufreq-pxa2xx.o
-obj-$(CONFIG_PXA3xx)		+= cpufreq-pxa3xx.o
-endif
-
 # Generic drivers that other drivers may depend upon
 
 # SoC-specific code
diff --git a/arch/arm/mach-pxa/include/mach/generic.h b/arch/arm/mach-pxa/include/mach/generic.h
new file mode 100644
index 0000000..665542e
--- /dev/null
+++ b/arch/arm/mach-pxa/include/mach/generic.h
@@ -0,0 +1 @@
+#include "../../generic.h"
diff --git a/arch/arm/mach-s3c24xx/cpufreq.c b/arch/arm/mach-s3c24xx/cpufreq.c
index 5f181e7..3c0e78e 100644
--- a/arch/arm/mach-s3c24xx/cpufreq.c
+++ b/arch/arm/mach-s3c24xx/cpufreq.c
@@ -204,7 +204,6 @@
 	freqs.old = cpu_cur.freq;
 	freqs.new = cpu_new.freq;
 
-	freqs.freqs.cpu = 0;
 	freqs.freqs.old = cpu_cur.freq.armclk / 1000;
 	freqs.freqs.new = cpu_new.freq.armclk / 1000;
 
@@ -218,9 +217,7 @@
 	s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk);
 
 	/* start the frequency change */
-
-	if (policy)
-		cpufreq_notify_transition(&freqs.freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_PRECHANGE);
 
 	/* If hclk is staying the same, then we do not need to
 	 * re-write the IO or the refresh timings whilst we are changing
@@ -264,8 +261,7 @@
 	local_irq_restore(flags);
 
 	/* notify everyone we've done this */
-	if (policy)
-		cpufreq_notify_transition(&freqs.freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_POSTCHANGE);
 
 	s3c_freq_dbg("%s: finished\n", __func__);
 	return 0;
diff --git a/arch/arm/mach-s3c64xx/cpuidle.c b/arch/arm/mach-s3c64xx/cpuidle.c
index ead5fab..3c8ab07 100644
--- a/arch/arm/mach-s3c64xx/cpuidle.c
+++ b/arch/arm/mach-s3c64xx/cpuidle.c
@@ -40,12 +40,9 @@
 	return index;
 }
 
-static DEFINE_PER_CPU(struct cpuidle_device, s3c64xx_cpuidle_device);
-
 static struct cpuidle_driver s3c64xx_cpuidle_driver = {
 	.name	= "s3c64xx_cpuidle",
 	.owner  = THIS_MODULE,
-	.en_core_tk_irqen = 1,
 	.states = {
 		{
 			.enter            = s3c64xx_enter_idle,
@@ -61,16 +58,6 @@
 
 static int __init s3c64xx_init_cpuidle(void)
 {
-	int ret;
-
-	cpuidle_register_driver(&s3c64xx_cpuidle_driver);
-
-	ret = cpuidle_register_device(&s3c64xx_cpuidle_device);
-	if (ret) {
-		pr_err("Failed to register cpuidle device: %d\n", ret);
-		return ret;
-	}
-
-	return 0;
+	return cpuidle_register(&s3c64xx_cpuidle_driver, NULL);
 }
 device_initcall(s3c64xx_init_cpuidle);
diff --git a/arch/arm/mach-sa1100/Kconfig b/arch/arm/mach-sa1100/Kconfig
index ca14dbd..04f9784 100644
--- a/arch/arm/mach-sa1100/Kconfig
+++ b/arch/arm/mach-sa1100/Kconfig
@@ -4,7 +4,7 @@
 
 config SA1100_ASSABET
 	bool "Assabet"
-	select CPU_FREQ_SA1110
+	select ARM_SA1110_CPUFREQ
 	help
 	  Say Y here if you are using the Intel(R) StrongARM(R) SA-1110
 	  Microprocessor Development Board (also known as the Assabet).
@@ -20,7 +20,7 @@
 
 config SA1100_CERF
 	bool "CerfBoard"
-	select CPU_FREQ_SA1110
+	select ARM_SA1110_CPUFREQ
 	help
 	  The Intrinsyc CerfBoard is based on the StrongARM 1110 (Discontinued).
 	  More information is available at:
@@ -47,7 +47,7 @@
 
 config SA1100_COLLIE
 	bool "Sharp Zaurus SL5500"
-	# FIXME: select CPU_FREQ_SA11x0
+	# FIXME: select ARM_SA11x0_CPUFREQ
 	select SHARP_LOCOMO
 	select SHARP_PARAM
 	select SHARP_SCOOP
@@ -56,7 +56,7 @@
 
 config SA1100_H3100
 	bool "Compaq iPAQ H3100"
-	select CPU_FREQ_SA1110
+	select ARM_SA1110_CPUFREQ
 	select HTC_EGPIO
 	help
 	  Say Y here if you intend to run this kernel on the Compaq iPAQ
@@ -67,7 +67,7 @@
 
 config SA1100_H3600
 	bool "Compaq iPAQ H3600/H3700"
-	select CPU_FREQ_SA1110
+	select ARM_SA1110_CPUFREQ
 	select HTC_EGPIO
 	help
 	  Say Y here if you intend to run this kernel on the Compaq iPAQ
@@ -78,7 +78,7 @@
 
 config SA1100_BADGE4
 	bool "HP Labs BadgePAD 4"
-	select CPU_FREQ_SA1100
+	select ARM_SA1100_CPUFREQ
 	select SA1111
 	help
 	  Say Y here if you want to build a kernel for the HP Laboratories
@@ -86,7 +86,7 @@
 
 config SA1100_JORNADA720
 	bool "HP Jornada 720"
-	# FIXME: select CPU_FREQ_SA11x0
+	# FIXME: select ARM_SA11x0_CPUFREQ
 	select SA1111
 	help
 	  Say Y here if you want to build a kernel for the HP Jornada 720
@@ -105,14 +105,14 @@
 
 config SA1100_HACKKIT
 	bool "HackKit Core CPU Board"
-	select CPU_FREQ_SA1100
+	select ARM_SA1100_CPUFREQ
 	help
 	  Say Y here to support the HackKit Core CPU Board
 	  <http://hackkit.eletztrick.de>;
 
 config SA1100_LART
 	bool "LART"
-	select CPU_FREQ_SA1100
+	select ARM_SA1100_CPUFREQ
 	help
 	  Say Y here if you are using the Linux Advanced Radio Terminal
 	  (also known as the LART).  See <http://www.lartmaker.nl/> for
@@ -120,7 +120,7 @@
 
 config SA1100_NANOENGINE
 	bool "nanoEngine"
-	select CPU_FREQ_SA1110
+	select ARM_SA1110_CPUFREQ
 	select PCI
 	select PCI_NANOENGINE
 	help
@@ -130,7 +130,7 @@
 
 config SA1100_PLEB
 	bool "PLEB"
-	select CPU_FREQ_SA1100
+	select ARM_SA1100_CPUFREQ
 	help
 	  Say Y here if you are using version 1 of the Portable Linux
 	  Embedded Board (also known as PLEB).
@@ -139,7 +139,7 @@
 
 config SA1100_SHANNON
 	bool "Shannon"
-	select CPU_FREQ_SA1100
+	select ARM_SA1100_CPUFREQ
 	help
 	  The Shannon (also known as a Tuxscreen, and also as a IS2630) was a
 	  limited edition webphone produced by Philips. The Shannon is a SA1100
@@ -148,7 +148,7 @@
 
 config SA1100_SIMPAD
 	bool "Simpad"
-	select CPU_FREQ_SA1110
+	select ARM_SA1110_CPUFREQ
 	help
 	  The SIEMENS webpad SIMpad is based on the StrongARM 1110. There
 	  are two different versions CL4 and SL4. CL4 has 32MB RAM and 16MB
diff --git a/arch/arm/mach-sa1100/Makefile b/arch/arm/mach-sa1100/Makefile
index 1aed9e7..2732eef 100644
--- a/arch/arm/mach-sa1100/Makefile
+++ b/arch/arm/mach-sa1100/Makefile
@@ -8,9 +8,6 @@
 obj-n :=
 obj-  :=
 
-obj-$(CONFIG_CPU_FREQ_SA1100)		+= cpu-sa1100.o
-obj-$(CONFIG_CPU_FREQ_SA1110)		+= cpu-sa1110.o
-
 # Specific board support
 obj-$(CONFIG_SA1100_ASSABET)		+= assabet.o
 obj-$(CONFIG_ASSABET_NEPONSET)		+= neponset.o
diff --git a/arch/arm/mach-sa1100/include/mach/generic.h b/arch/arm/mach-sa1100/include/mach/generic.h
new file mode 100644
index 0000000..665542e
--- /dev/null
+++ b/arch/arm/mach-sa1100/include/mach/generic.h
@@ -0,0 +1 @@
+#include "../../generic.h"
diff --git a/arch/arm/mach-shmobile/cpuidle.c b/arch/arm/mach-shmobile/cpuidle.c
index 9e05026..0afeb5c 100644
--- a/arch/arm/mach-shmobile/cpuidle.c
+++ b/arch/arm/mach-shmobile/cpuidle.c
@@ -16,39 +16,22 @@
 #include <asm/cpuidle.h>
 #include <asm/io.h>
 
-int shmobile_enter_wfi(struct cpuidle_device *dev, struct cpuidle_driver *drv,
-		       int index)
-{
-	cpu_do_idle();
-	return 0;
-}
-
-static struct cpuidle_device shmobile_cpuidle_dev;
 static struct cpuidle_driver shmobile_cpuidle_default_driver = {
 	.name			= "shmobile_cpuidle",
 	.owner			= THIS_MODULE,
-	.en_core_tk_irqen	= 1,
 	.states[0]		= ARM_CPUIDLE_WFI_STATE,
-	.states[0].enter	= shmobile_enter_wfi,
 	.safe_state_index	= 0, /* C1 */
 	.state_count		= 1,
 };
 
 static struct cpuidle_driver *cpuidle_drv = &shmobile_cpuidle_default_driver;
 
-void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv)
+void __init shmobile_cpuidle_set_driver(struct cpuidle_driver *drv)
 {
 	cpuidle_drv = drv;
 }
 
-int shmobile_cpuidle_init(void)
+int __init shmobile_cpuidle_init(void)
 {
-	struct cpuidle_device *dev = &shmobile_cpuidle_dev;
-
-	cpuidle_register_driver(cpuidle_drv);
-
-	dev->state_count = cpuidle_drv->state_count;
-	cpuidle_register_device(dev);
-
-	return 0;
+	return cpuidle_register(cpuidle_drv, NULL);
 }
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index e48606d..362f9b2 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -13,9 +13,6 @@
 extern void shmobile_handle_irq_intc(struct pt_regs *);
 extern struct platform_suspend_ops shmobile_suspend_ops;
 struct cpuidle_driver;
-struct cpuidle_device;
-extern int shmobile_enter_wfi(struct cpuidle_device *dev,
-			      struct cpuidle_driver *drv, int index);
 extern void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv);
 
 extern void sh7372_init_irq(void);
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index a0826a4..dec9293 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -410,11 +410,9 @@
 static struct cpuidle_driver sh7372_cpuidle_driver = {
 	.name			= "sh7372_cpuidle",
 	.owner			= THIS_MODULE,
-	.en_core_tk_irqen	= 1,
 	.state_count		= 5,
 	.safe_state_index	= 0, /* C1 */
 	.states[0] = ARM_CPUIDLE_WFI_STATE,
-	.states[0].enter = shmobile_enter_wfi,
 	.states[1] = {
 		.name = "C2",
 		.desc = "Core Standby Mode",
@@ -450,12 +448,12 @@
 	},
 };
 
-static void sh7372_cpuidle_init(void)
+static void __init sh7372_cpuidle_init(void)
 {
 	shmobile_cpuidle_set_driver(&sh7372_cpuidle_driver);
 }
 #else
-static void sh7372_cpuidle_init(void) {}
+static void __init sh7372_cpuidle_init(void) {}
 #endif
 
 #ifdef CONFIG_SUSPEND
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index f6b46ae..09b578f 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -24,7 +24,6 @@
 endif
 obj-$(CONFIG_SMP)			+= platsmp.o headsmp.o
 obj-$(CONFIG_HOTPLUG_CPU)               += hotplug.o
-obj-$(CONFIG_CPU_FREQ)                  += cpu-tegra.o
 obj-$(CONFIG_TEGRA_PCI)			+= pcie.o
 
 obj-$(CONFIG_ARCH_TEGRA_2x_SOC)		+= board-dt-tegra20.o
diff --git a/arch/arm/mach-tegra/cpuidle-tegra114.c b/arch/arm/mach-tegra/cpuidle-tegra114.c
index 0f4e8c4..1d1c602 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra114.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra114.c
@@ -23,39 +23,13 @@
 static struct cpuidle_driver tegra_idle_driver = {
 	.name = "tegra_idle",
 	.owner = THIS_MODULE,
-	.en_core_tk_irqen = 1,
 	.state_count = 1,
 	.states = {
 		[0] = ARM_CPUIDLE_WFI_STATE_PWR(600),
 	},
 };
 
-static DEFINE_PER_CPU(struct cpuidle_device, tegra_idle_device);
-
 int __init tegra114_cpuidle_init(void)
 {
-	int ret;
-	unsigned int cpu;
-	struct cpuidle_device *dev;
-	struct cpuidle_driver *drv = &tegra_idle_driver;
-
-	ret = cpuidle_register_driver(&tegra_idle_driver);
-	if (ret) {
-		pr_err("CPUidle driver registration failed\n");
-		return ret;
-	}
-
-	for_each_possible_cpu(cpu) {
-		dev = &per_cpu(tegra_idle_device, cpu);
-		dev->cpu = cpu;
-
-		dev->state_count = drv->state_count;
-		ret = cpuidle_register_device(dev);
-		if (ret) {
-			pr_err("CPU%u: CPUidle device registration failed\n",
-				cpu);
-			return ret;
-		}
-	}
-	return 0;
+	return cpuidle_register(&tegra_idle_driver, NULL);
 }
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
index 825ced4..590ec25 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
@@ -43,32 +43,33 @@
 static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
 				    struct cpuidle_driver *drv,
 				    int index);
+#define TEGRA20_MAX_STATES 2
+#else
+#define TEGRA20_MAX_STATES 1
 #endif
 
-static struct cpuidle_state tegra_idle_states[] = {
-	[0] = ARM_CPUIDLE_WFI_STATE_PWR(600),
-#ifdef CONFIG_PM_SLEEP
-	[1] = {
-		.enter			= tegra20_idle_lp2_coupled,
-		.exit_latency		= 5000,
-		.target_residency	= 10000,
-		.power_usage		= 0,
-		.flags			= CPUIDLE_FLAG_TIME_VALID |
-					  CPUIDLE_FLAG_COUPLED,
-		.name			= "powered-down",
-		.desc			= "CPU power gated",
-	},
-#endif
-};
-
 static struct cpuidle_driver tegra_idle_driver = {
 	.name = "tegra_idle",
 	.owner = THIS_MODULE,
-	.en_core_tk_irqen = 1,
+	.states = {
+		ARM_CPUIDLE_WFI_STATE_PWR(600),
+#ifdef CONFIG_PM_SLEEP
+		{
+			.enter            = tegra20_idle_lp2_coupled,
+			.exit_latency     = 5000,
+			.target_residency = 10000,
+			.power_usage      = 0,
+			.flags            = CPUIDLE_FLAG_TIME_VALID |
+			CPUIDLE_FLAG_COUPLED,
+			.name             = "powered-down",
+			.desc             = "CPU power gated",
+		},
+#endif
+	},
+	.state_count = TEGRA20_MAX_STATES,
+	.safe_state_index = 0,
 };
 
-static DEFINE_PER_CPU(struct cpuidle_device, tegra_idle_device);
-
 #ifdef CONFIG_PM_SLEEP
 #ifdef CONFIG_SMP
 static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
@@ -217,39 +218,8 @@
 
 int __init tegra20_cpuidle_init(void)
 {
-	int ret;
-	unsigned int cpu;
-	struct cpuidle_device *dev;
-	struct cpuidle_driver *drv = &tegra_idle_driver;
-
 #ifdef CONFIG_PM_SLEEP
 	tegra_tear_down_cpu = tegra20_tear_down_cpu;
 #endif
-
-	drv->state_count = ARRAY_SIZE(tegra_idle_states);
-	memcpy(drv->states, tegra_idle_states,
-			drv->state_count * sizeof(drv->states[0]));
-
-	ret = cpuidle_register_driver(&tegra_idle_driver);
-	if (ret) {
-		pr_err("CPUidle driver registration failed\n");
-		return ret;
-	}
-
-	for_each_possible_cpu(cpu) {
-		dev = &per_cpu(tegra_idle_device, cpu);
-		dev->cpu = cpu;
-#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
-		dev->coupled_cpus = *cpu_possible_mask;
-#endif
-
-		dev->state_count = drv->state_count;
-		ret = cpuidle_register_device(dev);
-		if (ret) {
-			pr_err("CPU%u: CPUidle device registration failed\n",
-				cpu);
-			return ret;
-		}
-	}
-	return 0;
+	return cpuidle_register(&tegra_idle_driver, cpu_possible_mask);
 }
diff --git a/arch/arm/mach-tegra/cpuidle-tegra30.c b/arch/arm/mach-tegra/cpuidle-tegra30.c
index 8b50cf4..36dc2be 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra30.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra30.c
@@ -43,7 +43,6 @@
 static struct cpuidle_driver tegra_idle_driver = {
 	.name = "tegra_idle",
 	.owner = THIS_MODULE,
-	.en_core_tk_irqen = 1,
 #ifdef CONFIG_PM_SLEEP
 	.state_count = 2,
 #else
@@ -65,8 +64,6 @@
 	},
 };
 
-static DEFINE_PER_CPU(struct cpuidle_device, tegra_idle_device);
-
 #ifdef CONFIG_PM_SLEEP
 static bool tegra30_cpu_cluster_power_down(struct cpuidle_device *dev,
 					   struct cpuidle_driver *drv,
@@ -157,32 +154,8 @@
 
 int __init tegra30_cpuidle_init(void)
 {
-	int ret;
-	unsigned int cpu;
-	struct cpuidle_device *dev;
-	struct cpuidle_driver *drv = &tegra_idle_driver;
-
 #ifdef CONFIG_PM_SLEEP
 	tegra_tear_down_cpu = tegra30_tear_down_cpu;
 #endif
-
-	ret = cpuidle_register_driver(&tegra_idle_driver);
-	if (ret) {
-		pr_err("CPUidle driver registration failed\n");
-		return ret;
-	}
-
-	for_each_possible_cpu(cpu) {
-		dev = &per_cpu(tegra_idle_device, cpu);
-		dev->cpu = cpu;
-
-		dev->state_count = drv->state_count;
-		ret = cpuidle_register_device(dev);
-		if (ret) {
-			pr_err("CPU%u: CPUidle device registration failed\n",
-				cpu);
-			return ret;
-		}
-	}
-	return 0;
+	return cpuidle_register(&tegra_idle_driver, NULL);
 }
diff --git a/arch/arm/mach-ux500/cpuidle.c b/arch/arm/mach-ux500/cpuidle.c
index ce91493..488e074 100644
--- a/arch/arm/mach-ux500/cpuidle.c
+++ b/arch/arm/mach-ux500/cpuidle.c
@@ -11,7 +11,6 @@
 
 #include <linux/module.h>
 #include <linux/cpuidle.h>
-#include <linux/clockchips.h>
 #include <linux/spinlock.h>
 #include <linux/atomic.h>
 #include <linux/smp.h>
@@ -22,7 +21,6 @@
 
 static atomic_t master = ATOMIC_INIT(0);
 static DEFINE_SPINLOCK(master_lock);
-static DEFINE_PER_CPU(struct cpuidle_device, ux500_cpuidle_device);
 
 static inline int ux500_enter_idle(struct cpuidle_device *dev,
 				   struct cpuidle_driver *drv, int index)
@@ -30,8 +28,6 @@
 	int this_cpu = smp_processor_id();
 	bool recouple = false;
 
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &this_cpu);
-
 	if (atomic_inc_return(&master) == num_online_cpus()) {
 
 		/* With this lock, we prevent the other cpu to exit and enter
@@ -91,22 +87,20 @@
 		spin_unlock(&master_lock);
 	}
 
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &this_cpu);
-
 	return index;
 }
 
 static struct cpuidle_driver ux500_idle_driver = {
 	.name = "ux500_idle",
 	.owner = THIS_MODULE,
-	.en_core_tk_irqen = 1,
 	.states = {
 		ARM_CPUIDLE_WFI_STATE,
 		{
 			.enter		  = ux500_enter_idle,
 			.exit_latency	  = 70,
 			.target_residency = 260,
-			.flags		  = CPUIDLE_FLAG_TIME_VALID,
+			.flags		  = CPUIDLE_FLAG_TIME_VALID |
+			                    CPUIDLE_FLAG_TIMER_STOP,
 			.name		  = "ApIdle",
 			.desc		  = "ARM Retention",
 		},
@@ -115,59 +109,13 @@
 	.state_count = 2,
 };
 
-/*
- * For each cpu, setup the broadcast timer because we will
- * need to migrate the timers for the states >= ApIdle.
- */
-static void ux500_setup_broadcast_timer(void *arg)
-{
-	int cpu = smp_processor_id();
-	clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
-}
-
 int __init ux500_idle_init(void)
 {
-	int ret, cpu;
-	struct cpuidle_device *device;
-
         /* Configure wake up reasons */
 	prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
 			     PRCMU_WAKEUP(ABB));
 
-	/*
-	 * Configure the timer broadcast for each cpu, that must
-	 * be done from the cpu context, so we use a smp cross
-	 * call with 'on_each_cpu'.
-	 */
-	on_each_cpu(ux500_setup_broadcast_timer, NULL, 1);
-
-	ret = cpuidle_register_driver(&ux500_idle_driver);
-	if (ret) {
-		printk(KERN_ERR "failed to register ux500 idle driver\n");
-		return ret;
-	}
-
-	for_each_online_cpu(cpu) {
-		device = &per_cpu(ux500_cpuidle_device, cpu);
-		device->cpu = cpu;
-		ret = cpuidle_register_device(device);
-		if (ret) {
-			printk(KERN_ERR "Failed to register cpuidle "
-			       "device for cpu%d\n", cpu);
-			goto out_unregister;
-		}
-	}
-out:
-	return ret;
-
-out_unregister:
-	for_each_online_cpu(cpu) {
-		device = &per_cpu(ux500_cpuidle_device, cpu);
-		cpuidle_unregister_device(device);
-	}
-
-	cpuidle_unregister_driver(&ux500_idle_driver);
-	goto out;
+	return cpuidle_register(&ux500_idle_driver, NULL);
 }
 
 device_initcall(ux500_idle_init);
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 52d315b..0f1c5e5 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -17,6 +17,9 @@
 	select NO_IOPORT
 	select PLAT_VERSATILE
 	select PLAT_VERSATILE_CLCD
+	select POWER_RESET
+	select POWER_RESET_VEXPRESS
+	select POWER_SUPPLY
 	select REGULATOR_FIXED_VOLTAGE if REGULATOR
 	select VEXPRESS_CONFIG
 	help
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
index 80b6497..42703e8 100644
--- a/arch/arm/mach-vexpress/Makefile
+++ b/arch/arm/mach-vexpress/Makefile
@@ -4,7 +4,7 @@
 ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
 	-I$(srctree)/arch/arm/plat-versatile/include
 
-obj-y					:= v2m.o reset.o
+obj-y					:= v2m.o
 obj-$(CONFIG_ARCH_VEXPRESS_CA9X4)	+= ct-ca9x4.o
 obj-$(CONFIG_SMP)			+= platsmp.o
 obj-$(CONFIG_HOTPLUG_CPU)		+= hotplug.o
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index c5e20b5..eb2b3a6 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -363,8 +363,6 @@
 	for (i = 0; i < ARRAY_SIZE(v2m_amba_devs); i++)
 		amba_device_register(v2m_amba_devs[i], &iomem_resource);
 
-	pm_power_off = vexpress_power_off;
-
 	ct_desc->init_tile();
 }
 
@@ -376,7 +374,6 @@
 	.init_irq	= v2m_init_irq,
 	.init_time	= v2m_timer_init,
 	.init_machine	= v2m_init,
-	.restart	= vexpress_restart,
 MACHINE_END
 
 static struct map_desc v2m_rs1_io_desc __initdata = {
@@ -470,7 +467,6 @@
 {
 	l2x0_of_init(0x00400000, 0xfe0fffff);
 	of_platform_populate(NULL, v2m_dt_bus_match, NULL, NULL);
-	pm_power_off = vexpress_power_off;
 }
 
 static const char * const v2m_dt_match[] __initconst = {
@@ -487,5 +483,4 @@
 	.init_irq	= irqchip_init,
 	.init_time	= v2m_dt_timer_init,
 	.init_machine	= v2m_dt_init,
-	.restart	= vexpress_restart,
 MACHINE_END
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index c1a868d..22c4030 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -250,20 +250,7 @@
 	def_bool y
 
 menu "CPU Frequency scaling"
-
 source "drivers/cpufreq/Kconfig"
-
-config CPU_FREQ_AT32AP
-	bool "CPU frequency driver for AT32AP"
-	depends on CPU_FREQ && PLATFORM_AT32AP
-	default n
-	help
-	  This enables the CPU frequency driver for AT32AP processors.
-
-	  For details, take a look in <file:Documentation/cpu-freq>.
-
-	  If in doubt, say N.
-
 endmenu
 
 endmenu
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
index f4025db..d5aff36 100644
--- a/arch/avr32/configs/atngw100_defconfig
+++ b/arch/avr32/configs/atngw100_defconfig
@@ -26,7 +26,7 @@
 # CONFIG_CPU_FREQ_STAT is not set
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_AVR32_AT32AP_CPUFREQ=y
 CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig
index c76a49b..4abcf43 100644
--- a/arch/avr32/configs/atngw100_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd100_defconfig
@@ -28,7 +28,7 @@
 # CONFIG_CPU_FREQ_STAT is not set
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_AVR32_AT32AP_CPUFREQ=y
 CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig
index 2d8ab08..18f3fa0 100644
--- a/arch/avr32/configs/atngw100_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd101_defconfig
@@ -27,7 +27,7 @@
 # CONFIG_CPU_FREQ_STAT is not set
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_AVR32_AT32AP_CPUFREQ=y
 CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
diff --git a/arch/avr32/configs/atngw100_mrmt_defconfig b/arch/avr32/configs/atngw100_mrmt_defconfig
index b189e0c..06e389c 100644
--- a/arch/avr32/configs/atngw100_mrmt_defconfig
+++ b/arch/avr32/configs/atngw100_mrmt_defconfig
@@ -23,7 +23,7 @@
 CONFIG_CPU_FREQ_GOV_POWERSAVE=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_AVR32_AT32AP_CPUFREQ=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig
index 2e4de42..2518a13 100644
--- a/arch/avr32/configs/atngw100mkii_defconfig
+++ b/arch/avr32/configs/atngw100mkii_defconfig
@@ -26,7 +26,7 @@
 # CONFIG_CPU_FREQ_STAT is not set
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_AVR32_AT32AP_CPUFREQ=y
 CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
index fad3cd2..245ef6b 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
@@ -29,7 +29,7 @@
 # CONFIG_CPU_FREQ_STAT is not set
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_AVR32_AT32AP_CPUFREQ=y
 CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
index 2998623..fa6cbac 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
@@ -28,7 +28,7 @@
 # CONFIG_CPU_FREQ_STAT is not set
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_AVR32_AT32AP_CPUFREQ=y
 CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig
index a582465..bbd5131 100644
--- a/arch/avr32/configs/atstk1002_defconfig
+++ b/arch/avr32/configs/atstk1002_defconfig
@@ -25,7 +25,7 @@
 # CONFIG_CPU_FREQ_STAT is not set
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_AVR32_AT32AP_CPUFREQ=y
 CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig
index 57a79df..c1cd726 100644
--- a/arch/avr32/configs/atstk1003_defconfig
+++ b/arch/avr32/configs/atstk1003_defconfig
@@ -26,7 +26,7 @@
 # CONFIG_CPU_FREQ_STAT is not set
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_AVR32_AT32AP_CPUFREQ=y
 CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig
index 1a49bd8..754ae56 100644
--- a/arch/avr32/configs/atstk1004_defconfig
+++ b/arch/avr32/configs/atstk1004_defconfig
@@ -26,7 +26,7 @@
 # CONFIG_CPU_FREQ_STAT is not set
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_AVR32_AT32AP_CPUFREQ=y
 CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig
index 206a1b6..58589d8 100644
--- a/arch/avr32/configs/atstk1006_defconfig
+++ b/arch/avr32/configs/atstk1006_defconfig
@@ -26,7 +26,7 @@
 # CONFIG_CPU_FREQ_STAT is not set
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_AVR32_AT32AP_CPUFREQ=y
 CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_NET=y
 CONFIG_PACKET=y
diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig
index 0421498..57788a4 100644
--- a/arch/avr32/configs/favr-32_defconfig
+++ b/arch/avr32/configs/favr-32_defconfig
@@ -27,7 +27,7 @@
 # CONFIG_CPU_FREQ_STAT is not set
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_AVR32_AT32AP_CPUFREQ=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig
index 82f24eb..ba7c31e 100644
--- a/arch/avr32/configs/hammerhead_defconfig
+++ b/arch/avr32/configs/hammerhead_defconfig
@@ -31,7 +31,7 @@
 # CONFIG_CPU_FREQ_STAT is not set
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_AVR32_AT32AP_CPUFREQ=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/avr32/configs/mimc200_defconfig b/arch/avr32/configs/mimc200_defconfig
index 1bee51f..0a8bfdc 100644
--- a/arch/avr32/configs/mimc200_defconfig
+++ b/arch/avr32/configs/mimc200_defconfig
@@ -24,7 +24,7 @@
 # CONFIG_CPU_FREQ_STAT is not set
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_AT32AP=y
+CONFIG_AVR32_AT32AP_CPUFREQ=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
diff --git a/arch/avr32/mach-at32ap/Makefile b/arch/avr32/mach-at32ap/Makefile
index 514c9a9..fc09ec4 100644
--- a/arch/avr32/mach-at32ap/Makefile
+++ b/arch/avr32/mach-at32ap/Makefile
@@ -1,7 +1,6 @@
 obj-y				+= pdc.o clock.o intc.o extint.o pio.o hsmc.o
 obj-y				+= hmatrix.o
 obj-$(CONFIG_CPU_AT32AP700X)	+= at32ap700x.o pm-at32ap700x.o
-obj-$(CONFIG_CPU_FREQ_AT32AP)	+= cpufreq.o
 obj-$(CONFIG_PM)		+= pm.o
 
 ifeq ($(CONFIG_PM_DEBUG),y)
diff --git a/arch/blackfin/mach-common/Makefile b/arch/blackfin/mach-common/Makefile
index 75f0ba2..675466d 100644
--- a/arch/blackfin/mach-common/Makefile
+++ b/arch/blackfin/mach-common/Makefile
@@ -10,7 +10,6 @@
 ifneq ($(CONFIG_BF60x),y)
 obj-$(CONFIG_PM)	  += dpmc_modes.o
 endif
-obj-$(CONFIG_CPU_FREQ)    += cpufreq.o
 obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o
 obj-$(CONFIG_SMP)         += smp.o
 obj-$(CONFIG_BFIN_KERNEL_CLOCK) += clocks-init.o
diff --git a/arch/cris/arch-v32/mach-a3/Makefile b/arch/cris/arch-v32/mach-a3/Makefile
index d366e08..18a2271 100644
--- a/arch/cris/arch-v32/mach-a3/Makefile
+++ b/arch/cris/arch-v32/mach-a3/Makefile
@@ -3,7 +3,6 @@
 #
 
 obj-y   := dma.o pinmux.o io.o arbiter.o
-obj-$(CONFIG_CPU_FREQ)   += cpufreq.o
 
 clean:
 
diff --git a/arch/cris/arch-v32/mach-fs/Makefile b/arch/cris/arch-v32/mach-fs/Makefile
index d366e08..18a2271 100644
--- a/arch/cris/arch-v32/mach-fs/Makefile
+++ b/arch/cris/arch-v32/mach-fs/Makefile
@@ -3,7 +3,6 @@
 #
 
 obj-y   := dma.o pinmux.o io.o arbiter.o
-obj-$(CONFIG_CPU_FREQ)   += cpufreq.o
 
 clean:
 
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index e7e55a0..e725ea0 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -591,9 +591,9 @@
 source "drivers/acpi/Kconfig"
 
 if PM
-
-source "arch/ia64/kernel/cpufreq/Kconfig"
-
+menu "CPU Frequency scaling"
+source "drivers/cpufreq/Kconfig"
+endmenu
 endif
 
 endmenu
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index d959c84..20678a9 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -23,7 +23,6 @@
 obj-$(CONFIG_NUMA)		+= numa.o
 obj-$(CONFIG_PERFMON)		+= perfmon_default_smpl.o
 obj-$(CONFIG_IA64_CYCLONE)	+= cyclone.o
-obj-$(CONFIG_CPU_FREQ)		+= cpufreq/
 obj-$(CONFIG_IA64_MCA_RECOVERY)	+= mca_recovery.o
 obj-$(CONFIG_KPROBES)		+= kprobes.o jprobes.o
 obj-$(CONFIG_DYNAMIC_FTRACE)	+= ftrace.o
diff --git a/arch/ia64/kernel/cpufreq/Kconfig b/arch/ia64/kernel/cpufreq/Kconfig
deleted file mode 100644
index 2d9d527..0000000
--- a/arch/ia64/kernel/cpufreq/Kconfig
+++ /dev/null
@@ -1,29 +0,0 @@
-
-#
-# CPU Frequency scaling
-#
-
-menu "CPU Frequency scaling"
-
-source "drivers/cpufreq/Kconfig"
-
-if CPU_FREQ
-
-comment "CPUFreq processor drivers"
-
-config IA64_ACPI_CPUFREQ
-	tristate "ACPI Processor P-States driver"
-	select CPU_FREQ_TABLE
-	depends on ACPI_PROCESSOR
-	help
-	This driver adds a CPUFreq driver which utilizes the ACPI
-	Processor Performance States.
-
-	For details, take a look at <file:Documentation/cpu-freq/>.
-
-	If in doubt, say N.
-
-endif   # CPU_FREQ
-
-endmenu
-
diff --git a/arch/ia64/kernel/cpufreq/Makefile b/arch/ia64/kernel/cpufreq/Makefile
deleted file mode 100644
index 4838f2a..0000000
--- a/arch/ia64/kernel/cpufreq/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-obj-$(CONFIG_IA64_ACPI_CPUFREQ)		+= acpi-cpufreq.o
-
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 3a7b395..007a917 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2541,7 +2541,14 @@
 
 endmenu
 
-source "arch/mips/kernel/cpufreq/Kconfig"
+config MIPS_EXTERNAL_TIMER
+	bool
+
+if CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER
+menu "CPU Power Management"
+source "drivers/cpufreq/Kconfig"
+endmenu
+endif
 
 source "net/Kconfig"
 
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index de75fb5..520a908 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -92,8 +92,6 @@
 
 obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT)	+= 8250-platform.o
 
-obj-$(CONFIG_MIPS_CPUFREQ)	+= cpufreq/
-
 obj-$(CONFIG_PERF_EVENTS)	+= perf_event.o
 obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event_mipsxx.o
 
diff --git a/arch/mips/kernel/cpufreq/Kconfig b/arch/mips/kernel/cpufreq/Kconfig
deleted file mode 100644
index 58c601e..0000000
--- a/arch/mips/kernel/cpufreq/Kconfig
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# CPU Frequency scaling
-#
-
-config MIPS_EXTERNAL_TIMER
-	bool
-
-config MIPS_CPUFREQ
-	bool
-	default y
-	depends on CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER
-
-if MIPS_CPUFREQ
-
-menu "CPU Frequency scaling"
-
-source "drivers/cpufreq/Kconfig"
-
-if CPU_FREQ
-
-comment "CPUFreq processor drivers"
-
-config LOONGSON2_CPUFREQ
-	tristate "Loongson2 CPUFreq Driver"
-	select CPU_FREQ_TABLE
-	depends on MIPS_CPUFREQ
-	help
-	  This option adds a CPUFreq driver for loongson processors which
-	  support software configurable cpu frequency.
-
-	  Loongson2F and it's successors support this feature.
-
-	  For details, take a look at <file:Documentation/cpu-freq/>.
-
-	  If in doubt, say N.
-
-endif	# CPU_FREQ
-
-endmenu
-
-endif	# MIPS_CPUFREQ
diff --git a/arch/mips/kernel/cpufreq/Makefile b/arch/mips/kernel/cpufreq/Makefile
deleted file mode 100644
index 05a5715..0000000
--- a/arch/mips/kernel/cpufreq/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the Linux/MIPS cpufreq.
-#
-
-obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 53aaefe..9978f59 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -113,34 +113,10 @@
 	default m
 	depends on CBE_RAS && SPU_BASE
 
-config CBE_CPUFREQ
-	tristate "CBE frequency scaling"
-	depends on CBE_RAS && CPU_FREQ
-	default m
-	help
-	  This adds the cpufreq driver for Cell BE processors.
-	  For details, take a look at <file:Documentation/cpu-freq/>.
-	  If you don't have such processor, say N
-
-config CBE_CPUFREQ_PMI_ENABLE
-	bool "CBE frequency scaling using PMI interface"
-	depends on CBE_CPUFREQ
-	default n
-	help
-	  Select this, if you want to use the PMI interface
-	  to switch frequencies. Using PMI, the
-	  processor will not only be able to run at lower speed,
-	  but also at lower core voltage.
-
-config CBE_CPUFREQ_PMI
-	tristate
-	depends on CBE_CPUFREQ_PMI_ENABLE
-	default CBE_CPUFREQ
-
 config PPC_PMI
 	tristate
 	default y
-	depends on CBE_CPUFREQ_PMI || PPC_IBM_CELL_POWERBUTTON
+	depends on CPU_FREQ_CBE_PMI || PPC_IBM_CELL_POWERBUTTON
 	help
 	  PMI (Platform Management Interrupt) is a way to
 	  communicate with the BMC (Baseboard Management Controller).
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index a4a8935..fe053e7 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -5,9 +5,6 @@
 obj-$(CONFIG_CBE_RAS)			+= ras.o
 
 obj-$(CONFIG_CBE_THERM)			+= cbe_thermal.o
-obj-$(CONFIG_CBE_CPUFREQ_PMI)		+= cbe_cpufreq_pmi.o
-obj-$(CONFIG_CBE_CPUFREQ)		+= cbe-cpufreq.o
-cbe-cpufreq-y				+= cbe_cpufreq_pervasive.o cbe_cpufreq.o
 obj-$(CONFIG_CBE_CPUFREQ_SPU_GOVERNOR)	+= cpufreq_spudemand.o
 
 obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON)	+= cbe_powerbutton.o
diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c
index 890f30e..be1e795 100644
--- a/arch/powerpc/platforms/pasemi/cpufreq.c
+++ b/arch/powerpc/platforms/pasemi/cpufreq.c
@@ -273,10 +273,9 @@
 
 	freqs.old = policy->cur;
 	freqs.new = pas_freqs[pas_astate_new].frequency;
-	freqs.cpu = policy->cpu;
 
 	mutex_lock(&pas_switch_mutex);
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	pr_debug("setting frequency for cpu %d to %d kHz, 1/%d of max frequency\n",
 		 policy->cpu,
@@ -288,7 +287,7 @@
 	for_each_online_cpu(i)
 		set_astate(i, pas_astate_new);
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 	mutex_unlock(&pas_switch_mutex);
 
 	ppc_proc_freq = freqs.new * 1000ul;
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
index 311b804..3104fad 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -335,7 +335,8 @@
 	return 0;
 }
 
-static int do_set_cpu_speed(int speed_mode, int notify)
+static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode,
+		int notify)
 {
 	struct cpufreq_freqs freqs;
 	unsigned long l3cr;
@@ -343,13 +344,12 @@
 
 	freqs.old = cur_freq;
 	freqs.new = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
-	freqs.cpu = smp_processor_id();
 
 	if (freqs.old == freqs.new)
 		return 0;
 
 	if (notify)
-		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+		cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 	if (speed_mode == CPUFREQ_LOW &&
 	    cpu_has_feature(CPU_FTR_L3CR)) {
 		l3cr = _get_L3CR();
@@ -366,7 +366,7 @@
 			_set_L3CR(prev_l3cr);
 	}
 	if (notify)
-		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+		cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 	cur_freq = (speed_mode == CPUFREQ_HIGH) ? hi_freq : low_freq;
 
 	return 0;
@@ -393,7 +393,7 @@
 			target_freq, relation, &newstate))
 		return -EINVAL;
 
-	rc = do_set_cpu_speed(newstate, 1);
+	rc = do_set_cpu_speed(policy, newstate, 1);
 
 	ppc_proc_freq = cur_freq * 1000ul;
 	return rc;
@@ -442,7 +442,7 @@
 	no_schedule = 1;
 	sleep_freq = cur_freq;
 	if (cur_freq == low_freq && !is_pmu_based)
-		do_set_cpu_speed(CPUFREQ_HIGH, 0);
+		do_set_cpu_speed(policy, CPUFREQ_HIGH, 0);
 	return 0;
 }
 
@@ -458,7 +458,7 @@
 	 * is that we force a switch to whatever it was, which is
 	 * probably high speed due to our suspend() routine
 	 */
-	do_set_cpu_speed(sleep_freq == low_freq ?
+	do_set_cpu_speed(policy, sleep_freq == low_freq ?
 			 CPUFREQ_LOW : CPUFREQ_HIGH, 0);
 
 	ppc_proc_freq = cur_freq * 1000ul;
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c
index 9650c602..7ba4234 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_64.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_64.c
@@ -339,11 +339,10 @@
 
 	freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency;
 	freqs.new = g5_cpu_freqs[newstate].frequency;
-	freqs.cpu = 0;
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 	rc = g5_switch_freq(newstate);
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	mutex_unlock(&g5_switch_mutex);
 
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index 4d806b4..4644efa0 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -23,8 +23,8 @@
 #include "pseries.h"
 
 struct cpuidle_driver pseries_idle_driver = {
-	.name =		"pseries_idle",
-	.owner =	THIS_MODULE,
+	.name             = "pseries_idle",
+	.owner            = THIS_MODULE,
 };
 
 #define MAX_IDLE_STATE_COUNT	2
@@ -33,10 +33,8 @@
 static struct cpuidle_device __percpu *pseries_cpuidle_devices;
 static struct cpuidle_state *cpuidle_state_table;
 
-static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before)
+static inline void idle_loop_prolog(unsigned long *in_purr)
 {
-
-	*kt_before = ktime_get();
 	*in_purr = mfspr(SPRN_PURR);
 	/*
 	 * Indicate to the HV that we are idle. Now would be
@@ -45,12 +43,10 @@
 	get_lppaca()->idle = 1;
 }
 
-static inline  s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before)
+static inline void idle_loop_epilog(unsigned long in_purr)
 {
 	get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr;
 	get_lppaca()->idle = 0;
-
-	return ktime_to_us(ktime_sub(ktime_get(), kt_before));
 }
 
 static int snooze_loop(struct cpuidle_device *dev,
@@ -58,10 +54,9 @@
 			int index)
 {
 	unsigned long in_purr;
-	ktime_t kt_before;
 	int cpu = dev->cpu;
 
-	idle_loop_prolog(&in_purr, &kt_before);
+	idle_loop_prolog(&in_purr);
 	local_irq_enable();
 	set_thread_flag(TIF_POLLING_NRFLAG);
 
@@ -75,8 +70,8 @@
 	clear_thread_flag(TIF_POLLING_NRFLAG);
 	smp_mb();
 
-	dev->last_residency =
-		(int)idle_loop_epilog(in_purr, kt_before);
+	idle_loop_epilog(in_purr);
+
 	return index;
 }
 
@@ -102,9 +97,8 @@
 				int index)
 {
 	unsigned long in_purr;
-	ktime_t kt_before;
 
-	idle_loop_prolog(&in_purr, &kt_before);
+	idle_loop_prolog(&in_purr);
 	get_lppaca()->donate_dedicated_cpu = 1;
 
 	ppc64_runlatch_off();
@@ -112,8 +106,9 @@
 	check_and_cede_processor();
 
 	get_lppaca()->donate_dedicated_cpu = 0;
-	dev->last_residency =
-		(int)idle_loop_epilog(in_purr, kt_before);
+
+	idle_loop_epilog(in_purr);
+
 	return index;
 }
 
@@ -122,9 +117,8 @@
 			int index)
 {
 	unsigned long in_purr;
-	ktime_t kt_before;
 
-	idle_loop_prolog(&in_purr, &kt_before);
+	idle_loop_prolog(&in_purr);
 
 	/*
 	 * Yield the processor to the hypervisor.  We return if
@@ -135,8 +129,8 @@
 	 */
 	check_and_cede_processor();
 
-	dev->last_residency =
-		(int)idle_loop_epilog(in_purr, kt_before);
+	idle_loop_epilog(in_purr);
+
 	return index;
 }
 
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 1ea597c..78d8ace 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -622,25 +622,7 @@
 endmenu
 
 menu "CPU Frequency scaling"
-
 source "drivers/cpufreq/Kconfig"
-
-config SH_CPU_FREQ
-	tristate "SuperH CPU Frequency driver"
-	depends on CPU_FREQ
-	select CPU_FREQ_TABLE
-	help
-	  This adds the cpufreq driver for SuperH. Any CPU that supports
-	  clock rate rounding through the clock framework can use this
-	  driver. While it will make the kernel slightly larger, this is
-	  harmless for CPUs that don't support rate rounding. The driver
-	  will also generate a notice in the boot log before disabling
-	  itself if the CPU in question is not capable of rate rounding.
-
-	  For details, take a look at <file:Documentation/cpu-freq>.
-
-	  If unsure, say N.
-
 endmenu
 
 source "arch/sh/drivers/Kconfig"
diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h
index e14567a..70ae0b2 100644
--- a/arch/sh/include/asm/suspend.h
+++ b/arch/sh/include/asm/suspend.h
@@ -14,9 +14,9 @@
 void sh_mobile_call_standby(unsigned long mode);
 
 #ifdef CONFIG_CPU_IDLE
-void sh_mobile_setup_cpuidle(void);
+int sh_mobile_setup_cpuidle(void);
 #else
-static inline void sh_mobile_setup_cpuidle(void) {}
+static inline int sh_mobile_setup_cpuidle(void) { return 0; }
 #endif
 
 /* notifier chains for pre/post sleep hooks */
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index f259b37..261c8bf 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -31,7 +31,6 @@
 obj-$(CONFIG_SMP)		+= smp.o
 obj-$(CONFIG_SH_STANDARD_BIOS)	+= sh_bios.o
 obj-$(CONFIG_KGDB)		+= kgdb.o
-obj-$(CONFIG_SH_CPU_FREQ)	+= cpufreq.o
 obj-$(CONFIG_MODULES)		+= sh_ksyms_$(BITS).o module.o
 obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
 obj-$(CONFIG_CRASH_DUMP)	+= crash_dump.o
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
index 1ddc876..d306225 100644
--- a/arch/sh/kernel/cpu/shmobile/cpuidle.c
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -51,70 +51,53 @@
 	return k;
 }
 
-static struct cpuidle_device cpuidle_dev;
 static struct cpuidle_driver cpuidle_driver = {
-	.name			= "sh_idle",
-	.owner			= THIS_MODULE,
-	.en_core_tk_irqen	= 1,
+	.name   = "sh_idle",
+	.owner  = THIS_MODULE,
+	.states = {
+		{
+			.exit_latency = 1,
+			.target_residency = 1 * 2,
+			.power_usage = 3,
+			.flags = CPUIDLE_FLAG_TIME_VALID,
+			.enter = cpuidle_sleep_enter,
+			.name = "C1",
+			.desc = "SuperH Sleep Mode",
+		},
+		{
+			.exit_latency = 100,
+			.target_residency = 1 * 2,
+			.power_usage = 1,
+			.flags = CPUIDLE_FLAG_TIME_VALID,
+			.enter = cpuidle_sleep_enter,
+			.name = "C2",
+			.desc = "SuperH Sleep Mode [SF]",
+			.disabled = true,
+		},
+		{
+			.exit_latency = 2300,
+			.target_residency = 1 * 2,
+			.power_usage = 1,
+			.flags = CPUIDLE_FLAG_TIME_VALID,
+			.enter = cpuidle_sleep_enter,
+			.name = "C3",
+			.desc = "SuperH Mobile Standby Mode [SF]",
+			.disabled = true,
+		},
+	},
+	.safe_state_index = 0,
+	.state_count = 3,
 };
 
-void sh_mobile_setup_cpuidle(void)
+int __init sh_mobile_setup_cpuidle(void)
 {
-	struct cpuidle_device *dev = &cpuidle_dev;
-	struct cpuidle_driver *drv = &cpuidle_driver;
-	struct cpuidle_state *state;
-	int i;
+	int ret;
 
+	if (sh_mobile_sleep_supported & SUSP_SH_SF)
+		cpuidle_driver.states[1].disabled = false;
 
-	for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
-		drv->states[i].name[0] = '\0';
-		drv->states[i].desc[0] = '\0';
-	}
+	if (sh_mobile_sleep_supported & SUSP_SH_STANDBY)
+		cpuidle_driver.states[2].disabled = false;
 
-	i = CPUIDLE_DRIVER_STATE_START;
-
-	state = &drv->states[i++];
-	snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
-	strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN);
-	state->exit_latency = 1;
-	state->target_residency = 1 * 2;
-	state->power_usage = 3;
-	state->flags = 0;
-	state->flags |= CPUIDLE_FLAG_TIME_VALID;
-	state->enter = cpuidle_sleep_enter;
-
-	drv->safe_state_index = i-1;
-
-	if (sh_mobile_sleep_supported & SUSP_SH_SF) {
-		state = &drv->states[i++];
-		snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
-		strncpy(state->desc, "SuperH Sleep Mode [SF]",
-			CPUIDLE_DESC_LEN);
-		state->exit_latency = 100;
-		state->target_residency = 1 * 2;
-		state->power_usage = 1;
-		state->flags = 0;
-		state->flags |= CPUIDLE_FLAG_TIME_VALID;
-		state->enter = cpuidle_sleep_enter;
-	}
-
-	if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) {
-		state = &drv->states[i++];
-		snprintf(state->name, CPUIDLE_NAME_LEN, "C3");
-		strncpy(state->desc, "SuperH Mobile Standby Mode [SF]",
-			CPUIDLE_DESC_LEN);
-		state->exit_latency = 2300;
-		state->target_residency = 1 * 2;
-		state->power_usage = 1;
-		state->flags = 0;
-		state->flags |= CPUIDLE_FLAG_TIME_VALID;
-		state->enter = cpuidle_sleep_enter;
-	}
-
-	drv->state_count = i;
-	dev->state_count = i;
-
-	cpuidle_register_driver(&cpuidle_driver);
-
-	cpuidle_register_device(dev);
+	return cpuidle_register(&cpuidle_driver);
 }
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
index 08d27fa..ac37b72 100644
--- a/arch/sh/kernel/cpu/shmobile/pm.c
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -150,8 +150,7 @@
 static int __init sh_pm_init(void)
 {
 	suspend_set_ops(&sh_pm_ops);
-	sh_mobile_setup_cpuidle();
-	return 0;
+	return sh_mobile_setup_cpuidle();
 }
 
 late_initcall(sh_pm_init);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 66dc562..e564115 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -254,29 +254,6 @@
 
 if SPARC64
 source "drivers/cpufreq/Kconfig"
-
-config US3_FREQ
-	tristate "UltraSPARC-III CPU Frequency driver"
-	depends on CPU_FREQ
-	select CPU_FREQ_TABLE
-	help
-	  This adds the CPUFreq driver for UltraSPARC-III processors.
-
-	  For details, take a look at <file:Documentation/cpu-freq>.
-
-	  If in doubt, say N.
-
-config US2E_FREQ
-	tristate "UltraSPARC-IIe CPU Frequency driver"
-	depends on CPU_FREQ
-	select CPU_FREQ_TABLE
-	help
-	  This adds the CPUFreq driver for UltraSPARC-IIe processors.
-
-	  For details, take a look at <file:Documentation/cpu-freq>.
-
-	  If in doubt, say N.
-
 endif
 
 config US3_MC
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 6cf591b..5276fd4 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -102,9 +102,6 @@
 
 obj-$(CONFIG_COMPAT)         += sys32.o sys_sparc32.o signal32.o
 
-# sparc64 cpufreq
-obj-$(CONFIG_US3_FREQ)  += us3_cpufreq.o
-obj-$(CONFIG_US2E_FREQ) += us2e_cpufreq.o
 obj-$(CONFIG_US3_MC)    += chmc.o
 
 obj-$(CONFIG_KPROBES)   += kprobes.o
diff --git a/arch/unicore32/kernel/Makefile b/arch/unicore32/kernel/Makefile
index fa497e0..607a72f 100644
--- a/arch/unicore32/kernel/Makefile
+++ b/arch/unicore32/kernel/Makefile
@@ -9,7 +9,6 @@
 obj-$(CONFIG_MODULES)		+= ksyms.o module.o
 obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
 
-obj-$(CONFIG_CPU_FREQ)		+= cpu-ucv2.o
 obj-$(CONFIG_UNICORE_FPU_F64)	+= fpu-ucf64.o
 
 # obj-y for architecture PKUnity v3
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 398f7cb..8010ebc 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -185,6 +185,7 @@
 #define X86_FEATURE_PTS		(7*32+ 6) /* Intel Package Thermal Status */
 #define X86_FEATURE_DTHERM	(7*32+ 7) /* Digital Thermal Sensor */
 #define X86_FEATURE_HW_PSTATE	(7*32+ 8) /* AMD HW-PState */
+#define X86_FEATURE_PROC_FEEDBACK (7*32+ 9) /* AMD ProcFeedbackInterface */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW  (8*32+ 0) /* Intel TPR Shadow */
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 66b5faf..53a4e27 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -373,7 +373,6 @@
 static struct cpuidle_driver apm_idle_driver = {
 	.name = "apm_idle",
 	.owner = THIS_MODULE,
-	.en_core_tk_irqen = 1,
 	.states = {
 		{ /* entry 0 is for polling */ },
 		{ /* entry 1 is for APM idle */
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index ee8e9ab..d92b5da 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -39,8 +39,9 @@
 		{ X86_FEATURE_APERFMPERF,	CR_ECX, 0, 0x00000006, 0 },
 		{ X86_FEATURE_EPB,		CR_ECX, 3, 0x00000006, 0 },
 		{ X86_FEATURE_XSAVEOPT,		CR_EAX,	0, 0x0000000d, 1 },
-		{ X86_FEATURE_CPB,		CR_EDX, 9, 0x80000007, 0 },
 		{ X86_FEATURE_HW_PSTATE,	CR_EDX, 7, 0x80000007, 0 },
+		{ X86_FEATURE_CPB,		CR_EDX, 9, 0x80000007, 0 },
+		{ X86_FEATURE_PROC_FEEDBACK,	CR_EDX,11, 0x80000007, 0 },
 		{ X86_FEATURE_NPT,		CR_EDX, 0, 0x8000000a, 0 },
 		{ X86_FEATURE_LBRV,		CR_EDX, 1, 0x8000000a, 0 },
 		{ X86_FEATURE_SVML,		CR_EDX, 2, 0x8000000a, 0 },
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 4bf68c8..100bd72 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -298,14 +298,6 @@
 	  Documentation/kernel-parameters.txt to control the type and
 	  amount of debug output.
 
-config ACPI_DEBUG_FUNC_TRACE
-	bool "Additionally enable ACPI function tracing"
-	default n
-	depends on ACPI_DEBUG
-	help
-	  ACPI Debug Statements slow down ACPI processing. Function trace
-	  is about half of the penalty and is rarely useful.
-
 config ACPI_PCI_SLOT
 	bool "PCI slot detection driver"
 	depends on SYSFS
@@ -334,7 +326,7 @@
 
 config ACPI_CONTAINER
 	bool "Container and Module Devices"
-	default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU || ACPI_HOTPLUG_IO)
+	default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU)
 	help
 	  This driver supports ACPI Container and Module devices (IDs
 	  ACPI0004, PNP0A05, and PNP0A06).
@@ -345,9 +337,8 @@
 	  the module will be called container.
 
 config ACPI_HOTPLUG_MEMORY
-	tristate "Memory Hotplug"
+	bool "Memory Hotplug"
 	depends on MEMORY_HOTPLUG
-	default n
 	help
 	  This driver supports ACPI memory hotplug.  The driver
 	  fields notifications on ACPI memory devices (PNP0C80),
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 474fcfeb..ecb743b 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -39,6 +39,7 @@
 acpi-$(CONFIG_ACPI_DOCK)	+= dock.o
 acpi-y				+= pci_root.o pci_link.o pci_irq.o
 acpi-y				+= csrt.o
+acpi-$(CONFIG_X86_INTEL_LPSS)	+= acpi_lpss.o
 acpi-y				+= acpi_platform.o
 acpi-y				+= power.o
 acpi-y				+= event.o
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
new file mode 100644
index 0000000..b1c9542
--- /dev/null
+++ b/drivers/acpi/acpi_lpss.c
@@ -0,0 +1,292 @@
+/*
+ * ACPI support for Intel Lynxpoint LPSS.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *          Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/clk-lpss.h>
+#include <linux/pm_runtime.h>
+
+#include "internal.h"
+
+ACPI_MODULE_NAME("acpi_lpss");
+
+#define LPSS_CLK_SIZE	0x04
+#define LPSS_LTR_SIZE	0x18
+
+/* Offsets relative to LPSS_PRIVATE_OFFSET */
+#define LPSS_GENERAL			0x08
+#define LPSS_GENERAL_LTR_MODE_SW	BIT(2)
+#define LPSS_SW_LTR			0x10
+#define LPSS_AUTO_LTR			0x14
+
+struct lpss_device_desc {
+	bool clk_required;
+	const char *clk_parent;
+	bool ltr_required;
+	unsigned int prv_offset;
+};
+
+struct lpss_private_data {
+	void __iomem *mmio_base;
+	resource_size_t mmio_size;
+	struct clk *clk;
+	const struct lpss_device_desc *dev_desc;
+};
+
+static struct lpss_device_desc lpt_dev_desc = {
+	.clk_required = true,
+	.clk_parent = "lpss_clk",
+	.prv_offset = 0x800,
+	.ltr_required = true,
+};
+
+static struct lpss_device_desc lpt_sdio_dev_desc = {
+	.prv_offset = 0x1000,
+	.ltr_required = true,
+};
+
+static const struct acpi_device_id acpi_lpss_device_ids[] = {
+	/* Lynxpoint LPSS devices */
+	{ "INT33C0", (unsigned long)&lpt_dev_desc },
+	{ "INT33C1", (unsigned long)&lpt_dev_desc },
+	{ "INT33C2", (unsigned long)&lpt_dev_desc },
+	{ "INT33C3", (unsigned long)&lpt_dev_desc },
+	{ "INT33C4", (unsigned long)&lpt_dev_desc },
+	{ "INT33C5", (unsigned long)&lpt_dev_desc },
+	{ "INT33C6", (unsigned long)&lpt_sdio_dev_desc },
+	{ "INT33C7", },
+
+	{ }
+};
+
+static int is_memory(struct acpi_resource *res, void *not_used)
+{
+	struct resource r;
+	return !acpi_dev_resource_memory(res, &r);
+}
+
+/* LPSS main clock device. */
+static struct platform_device *lpss_clk_dev;
+
+static inline void lpt_register_clock_device(void)
+{
+	lpss_clk_dev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
+}
+
+static int register_device_clock(struct acpi_device *adev,
+				 struct lpss_private_data *pdata)
+{
+	const struct lpss_device_desc *dev_desc = pdata->dev_desc;
+
+	if (!lpss_clk_dev)
+		lpt_register_clock_device();
+
+	if (!dev_desc->clk_parent || !pdata->mmio_base
+	    || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
+		return -ENODATA;
+
+	pdata->clk = clk_register_gate(NULL, dev_name(&adev->dev),
+				       dev_desc->clk_parent, 0,
+				       pdata->mmio_base + dev_desc->prv_offset,
+				       0, 0, NULL);
+	if (IS_ERR(pdata->clk))
+		return PTR_ERR(pdata->clk);
+
+	clk_register_clkdev(pdata->clk, NULL, dev_name(&adev->dev));
+	return 0;
+}
+
+static int acpi_lpss_create_device(struct acpi_device *adev,
+				   const struct acpi_device_id *id)
+{
+	struct lpss_device_desc *dev_desc;
+	struct lpss_private_data *pdata;
+	struct resource_list_entry *rentry;
+	struct list_head resource_list;
+	int ret;
+
+	dev_desc = (struct lpss_device_desc *)id->driver_data;
+	if (!dev_desc)
+		return acpi_create_platform_device(adev, id);
+
+	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&resource_list);
+	ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL);
+	if (ret < 0)
+		goto err_out;
+
+	list_for_each_entry(rentry, &resource_list, node)
+		if (resource_type(&rentry->res) == IORESOURCE_MEM) {
+			pdata->mmio_size = resource_size(&rentry->res);
+			pdata->mmio_base = ioremap(rentry->res.start,
+						   pdata->mmio_size);
+			pdata->dev_desc = dev_desc;
+			break;
+		}
+
+	acpi_dev_free_resource_list(&resource_list);
+
+	if (dev_desc->clk_required) {
+		ret = register_device_clock(adev, pdata);
+		if (ret) {
+			/*
+			 * Skip the device, but don't terminate the namespace
+			 * scan.
+			 */
+			kfree(pdata);
+			return 0;
+		}
+	}
+
+	adev->driver_data = pdata;
+	ret = acpi_create_platform_device(adev, id);
+	if (ret > 0)
+		return ret;
+
+	adev->driver_data = NULL;
+
+ err_out:
+	kfree(pdata);
+	return ret;
+}
+
+static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
+{
+	struct acpi_device *adev;
+	struct lpss_private_data *pdata;
+	unsigned long flags;
+	int ret;
+
+	ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev);
+	if (WARN_ON(ret))
+		return ret;
+
+	spin_lock_irqsave(&dev->power.lock, flags);
+	if (pm_runtime_suspended(dev)) {
+		ret = -EAGAIN;
+		goto out;
+	}
+	pdata = acpi_driver_data(adev);
+	if (WARN_ON(!pdata || !pdata->mmio_base)) {
+		ret = -ENODEV;
+		goto out;
+	}
+	*val = readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
+
+ out:
+	spin_unlock_irqrestore(&dev->power.lock, flags);
+	return ret;
+}
+
+static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	u32 ltr_value = 0;
+	unsigned int reg;
+	int ret;
+
+	reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR;
+	ret = lpss_reg_read(dev, reg, &ltr_value);
+	if (ret)
+		return ret;
+
+	return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value);
+}
+
+static ssize_t lpss_ltr_mode_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	u32 ltr_mode = 0;
+	char *outstr;
+	int ret;
+
+	ret = lpss_reg_read(dev, LPSS_GENERAL, &ltr_mode);
+	if (ret)
+		return ret;
+
+	outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto";
+	return sprintf(buf, "%s\n", outstr);
+}
+
+static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL);
+static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL);
+static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL);
+
+static struct attribute *lpss_attrs[] = {
+	&dev_attr_auto_ltr.attr,
+	&dev_attr_sw_ltr.attr,
+	&dev_attr_ltr_mode.attr,
+	NULL,
+};
+
+static struct attribute_group lpss_attr_group = {
+	.attrs = lpss_attrs,
+	.name = "lpss_ltr",
+};
+
+static int acpi_lpss_platform_notify(struct notifier_block *nb,
+				     unsigned long action, void *data)
+{
+	struct platform_device *pdev = to_platform_device(data);
+	struct lpss_private_data *pdata;
+	struct acpi_device *adev;
+	const struct acpi_device_id *id;
+	int ret = 0;
+
+	id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev);
+	if (!id || !id->driver_data)
+		return 0;
+
+	if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
+		return 0;
+
+	pdata = acpi_driver_data(adev);
+	if (!pdata || !pdata->mmio_base || !pdata->dev_desc->ltr_required)
+		return 0;
+
+	if (pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) {
+		dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n");
+		return 0;
+	}
+
+	if (action == BUS_NOTIFY_ADD_DEVICE)
+		ret = sysfs_create_group(&pdev->dev.kobj, &lpss_attr_group);
+	else if (action == BUS_NOTIFY_DEL_DEVICE)
+		sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
+
+	return ret;
+}
+
+static struct notifier_block acpi_lpss_nb = {
+	.notifier_call = acpi_lpss_platform_notify,
+};
+
+static struct acpi_scan_handler lpss_handler = {
+	.ids = acpi_lpss_device_ids,
+	.attach = acpi_lpss_create_device,
+};
+
+void __init acpi_lpss_init(void)
+{
+	if (!lpt_clk_init()) {
+		bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
+		acpi_scan_add_handler(&lpss_handler);
+	}
+}
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index da1f82b4..5e6301e 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -1,5 +1,7 @@
 /*
- * Copyright (C) 2004 Intel Corporation <naveen.b.s@intel.com>
+ * Copyright (C) 2004, 2013 Intel Corporation
+ * Author: Naveen B S <naveen.b.s@intel.com>
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  *
  * All rights reserved.
  *
@@ -25,14 +27,10 @@
  * ranges.
  */
 
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/memory_hotplug.h>
-#include <linux/slab.h>
 #include <linux/acpi.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/memory_hotplug.h>
+
+#include "internal.h"
 
 #define ACPI_MEMORY_DEVICE_CLASS		"memory"
 #define ACPI_MEMORY_DEVICE_HID			"PNP0C80"
@@ -44,32 +42,28 @@
 #define 	PREFIX		"ACPI:memory_hp:"
 
 ACPI_MODULE_NAME("acpi_memhotplug");
-MODULE_AUTHOR("Naveen B S <naveen.b.s@intel.com>");
-MODULE_DESCRIPTION("Hotplug Mem Driver");
-MODULE_LICENSE("GPL");
 
 /* Memory Device States */
 #define MEMORY_INVALID_STATE	0
 #define MEMORY_POWER_ON_STATE	1
 #define MEMORY_POWER_OFF_STATE	2
 
-static int acpi_memory_device_add(struct acpi_device *device);
-static int acpi_memory_device_remove(struct acpi_device *device);
+static int acpi_memory_device_add(struct acpi_device *device,
+				  const struct acpi_device_id *not_used);
+static void acpi_memory_device_remove(struct acpi_device *device);
 
 static const struct acpi_device_id memory_device_ids[] = {
 	{ACPI_MEMORY_DEVICE_HID, 0},
 	{"", 0},
 };
-MODULE_DEVICE_TABLE(acpi, memory_device_ids);
 
-static struct acpi_driver acpi_memory_device_driver = {
-	.name = "acpi_memhotplug",
-	.class = ACPI_MEMORY_DEVICE_CLASS,
+static struct acpi_scan_handler memory_device_handler = {
 	.ids = memory_device_ids,
-	.ops = {
-		.add = acpi_memory_device_add,
-		.remove = acpi_memory_device_remove,
-		},
+	.attach = acpi_memory_device_add,
+	.detach = acpi_memory_device_remove,
+	.hotplug = {
+		.enabled = true,
+	},
 };
 
 struct acpi_memory_info {
@@ -79,7 +73,6 @@
 	unsigned short caching;	/* memory cache attribute */
 	unsigned short write_protect;	/* memory read/write attribute */
 	unsigned int enabled:1;
-	unsigned int failed:1;
 };
 
 struct acpi_memory_device {
@@ -153,48 +146,6 @@
 	return 0;
 }
 
-static int acpi_memory_get_device(acpi_handle handle,
-				  struct acpi_memory_device **mem_device)
-{
-	struct acpi_device *device = NULL;
-	int result = 0;
-
-	acpi_scan_lock_acquire();
-
-	acpi_bus_get_device(handle, &device);
-	if (device)
-		goto end;
-
-	/*
-	 * Now add the notified device.  This creates the acpi_device
-	 * and invokes .add function
-	 */
-	result = acpi_bus_scan(handle);
-	if (result) {
-		acpi_handle_warn(handle, "ACPI namespace scan failed\n");
-		result = -EINVAL;
-		goto out;
-	}
-	result = acpi_bus_get_device(handle, &device);
-	if (result) {
-		acpi_handle_warn(handle, "Missing device object\n");
-		result = -EINVAL;
-		goto out;
-	}
-
- end:
-	*mem_device = acpi_driver_data(device);
-	if (!(*mem_device)) {
-		dev_err(&device->dev, "driver data not found\n");
-		result = -ENODEV;
-		goto out;
-	}
-
- out:
-	acpi_scan_lock_release();
-	return result;
-}
-
 static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
 {
 	unsigned long long current_status;
@@ -249,13 +200,11 @@
 		 * returns -EEXIST. If add_memory() returns the other error, it
 		 * means that this memory block is not used by the kernel.
 		 */
-		if (result && result != -EEXIST) {
-			info->failed = 1;
+		if (result && result != -EEXIST)
 			continue;
-		}
 
-		if (!result)
-			info->enabled = 1;
+		info->enabled = 1;
+
 		/*
 		 * Add num_enable even if add_memory() returns -EEXIST, so the
 		 * device is bound to this driver.
@@ -286,16 +235,8 @@
 	nid = acpi_get_node(mem_device->device->handle);
 
 	list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
-		if (info->failed)
-			/* The kernel does not use this memory block */
-			continue;
-
 		if (!info->enabled)
-			/*
-			 * The kernel uses this memory block, but it may be not
-			 * managed by us.
-			 */
-			return -EBUSY;
+			continue;
 
 		if (nid < 0)
 			nid = memory_add_physaddr_to_nid(info->start_addr);
@@ -310,95 +251,21 @@
 	return result;
 }
 
-static void acpi_memory_device_notify(acpi_handle handle, u32 event, void *data)
-{
-	struct acpi_memory_device *mem_device;
-	struct acpi_device *device;
-	struct acpi_eject_event *ej_event = NULL;
-	u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
-	acpi_status status;
-
-	switch (event) {
-	case ACPI_NOTIFY_BUS_CHECK:
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "\nReceived BUS CHECK notification for device\n"));
-		/* Fall Through */
-	case ACPI_NOTIFY_DEVICE_CHECK:
-		if (event == ACPI_NOTIFY_DEVICE_CHECK)
-			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-					  "\nReceived DEVICE CHECK notification for device\n"));
-		if (acpi_memory_get_device(handle, &mem_device)) {
-			acpi_handle_err(handle, "Cannot find driver data\n");
-			break;
-		}
-
-		ost_code = ACPI_OST_SC_SUCCESS;
-		break;
-
-	case ACPI_NOTIFY_EJECT_REQUEST:
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "\nReceived EJECT REQUEST notification for device\n"));
-
-		status = AE_ERROR;
-		acpi_scan_lock_acquire();
-
-		if (acpi_bus_get_device(handle, &device)) {
-			acpi_handle_err(handle, "Device doesn't exist\n");
-			goto unlock;
-		}
-		mem_device = acpi_driver_data(device);
-		if (!mem_device) {
-			acpi_handle_err(handle, "Driver Data is NULL\n");
-			goto unlock;
-		}
-
-		ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
-		if (!ej_event) {
-			pr_err(PREFIX "No memory, dropping EJECT\n");
-			goto unlock;
-		}
-
-		get_device(&device->dev);
-		ej_event->device = device;
-		ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
-		/* The eject is carried out asynchronously. */
-		status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device,
-						 ej_event);
-		if (ACPI_FAILURE(status)) {
-			put_device(&device->dev);
-			kfree(ej_event);
-		}
-
- unlock:
-		acpi_scan_lock_release();
-		if (ACPI_SUCCESS(status))
-			return;
-	default:
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "Unsupported event [0x%x]\n", event));
-
-		/* non-hotplug event; possibly handled by other handler */
-		return;
-	}
-
-	/* Inform firmware that the hotplug operation has completed */
-	(void) acpi_evaluate_hotplug_ost(handle, event, ost_code, NULL);
-}
-
 static void acpi_memory_device_free(struct acpi_memory_device *mem_device)
 {
 	if (!mem_device)
 		return;
 
 	acpi_memory_free_device_resources(mem_device);
+	mem_device->device->driver_data = NULL;
 	kfree(mem_device);
 }
 
-static int acpi_memory_device_add(struct acpi_device *device)
+static int acpi_memory_device_add(struct acpi_device *device,
+				  const struct acpi_device_id *not_used)
 {
+	struct acpi_memory_device *mem_device;
 	int result;
-	struct acpi_memory_device *mem_device = NULL;
-
 
 	if (!device)
 		return -EINVAL;
@@ -423,147 +290,36 @@
 	/* Set the device state */
 	mem_device->state = MEMORY_POWER_ON_STATE;
 
-	pr_debug("%s\n", acpi_device_name(device));
-
-	if (!acpi_memory_check_device(mem_device)) {
-		/* call add_memory func */
-		result = acpi_memory_enable_device(mem_device);
-		if (result) {
-			dev_err(&device->dev,
-				"Error in acpi_memory_enable_device\n");
-			acpi_memory_device_free(mem_device);
-		}
+	result = acpi_memory_check_device(mem_device);
+	if (result) {
+		acpi_memory_device_free(mem_device);
+		return 0;
 	}
-	return result;
+
+	result = acpi_memory_enable_device(mem_device);
+	if (result) {
+		dev_err(&device->dev, "acpi_memory_enable_device() error\n");
+		acpi_memory_device_free(mem_device);
+		return -ENODEV;
+	}
+
+	dev_dbg(&device->dev, "Memory device configured by ACPI\n");
+	return 1;
 }
 
-static int acpi_memory_device_remove(struct acpi_device *device)
+static void acpi_memory_device_remove(struct acpi_device *device)
 {
-	struct acpi_memory_device *mem_device = NULL;
-	int result;
+	struct acpi_memory_device *mem_device;
 
 	if (!device || !acpi_driver_data(device))
-		return -EINVAL;
+		return;
 
 	mem_device = acpi_driver_data(device);
-
-	result = acpi_memory_remove_memory(mem_device);
-	if (result)
-		return result;
-
+	acpi_memory_remove_memory(mem_device);
 	acpi_memory_device_free(mem_device);
-
-	return 0;
 }
 
-/*
- * Helper function to check for memory device
- */
-static acpi_status is_memory_device(acpi_handle handle)
+void __init acpi_memory_hotplug_init(void)
 {
-	char *hardware_id;
-	acpi_status status;
-	struct acpi_device_info *info;
-
-	status = acpi_get_object_info(handle, &info);
-	if (ACPI_FAILURE(status))
-		return status;
-
-	if (!(info->valid & ACPI_VALID_HID)) {
-		kfree(info);
-		return AE_ERROR;
-	}
-
-	hardware_id = info->hardware_id.string;
-	if ((hardware_id == NULL) ||
-	    (strcmp(hardware_id, ACPI_MEMORY_DEVICE_HID)))
-		status = AE_ERROR;
-
-	kfree(info);
-	return status;
+	acpi_scan_add_handler_with_hotplug(&memory_device_handler, "memory");
 }
-
-static acpi_status
-acpi_memory_register_notify_handler(acpi_handle handle,
-				    u32 level, void *ctxt, void **retv)
-{
-	acpi_status status;
-
-
-	status = is_memory_device(handle);
-	if (ACPI_FAILURE(status))
-		return AE_OK;	/* continue */
-
-	status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
-					     acpi_memory_device_notify, NULL);
-	/* continue */
-	return AE_OK;
-}
-
-static acpi_status
-acpi_memory_deregister_notify_handler(acpi_handle handle,
-				      u32 level, void *ctxt, void **retv)
-{
-	acpi_status status;
-
-
-	status = is_memory_device(handle);
-	if (ACPI_FAILURE(status))
-		return AE_OK;	/* continue */
-
-	status = acpi_remove_notify_handler(handle,
-					    ACPI_SYSTEM_NOTIFY,
-					    acpi_memory_device_notify);
-
-	return AE_OK;	/* continue */
-}
-
-static int __init acpi_memory_device_init(void)
-{
-	int result;
-	acpi_status status;
-
-
-	result = acpi_bus_register_driver(&acpi_memory_device_driver);
-
-	if (result < 0)
-		return -ENODEV;
-
-	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
-				     ACPI_UINT32_MAX,
-				     acpi_memory_register_notify_handler, NULL,
-				     NULL, NULL);
-
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status, "walk_namespace failed"));
-		acpi_bus_unregister_driver(&acpi_memory_device_driver);
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
-static void __exit acpi_memory_device_exit(void)
-{
-	acpi_status status;
-
-
-	/*
-	 * Adding this to un-install notification handlers for all the device
-	 * handles.
-	 */
-	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
-				     ACPI_UINT32_MAX,
-				     acpi_memory_deregister_notify_handler, NULL,
-				     NULL, NULL);
-
-	if (ACPI_FAILURE(status))
-		ACPI_EXCEPTION((AE_INFO, status, "walk_namespace failed"));
-
-	acpi_bus_unregister_driver(&acpi_memory_device_driver);
-
-	return;
-}
-
-module_init(acpi_memory_device_init);
-module_exit(acpi_memory_device_exit);
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 31de104..27bb6a9 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -236,7 +236,7 @@
 	ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
 		(void *)(unsigned long)ps_tsk_num,
 		"acpi_pad/%d", ps_tsk_num);
-	rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0;
+	rc = PTR_RET(ps_tsks[ps_tsk_num]);
 	if (!rc)
 		ps_tsk_num++;
 	else
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 26fce4b..fafec5d 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -22,9 +22,6 @@
 
 ACPI_MODULE_NAME("platform");
 
-/* Flags for acpi_create_platform_device */
-#define ACPI_PLATFORM_CLK	BIT(0)
-
 /*
  * The following ACPI IDs are known to be suitable for representing as
  * platform devices.
@@ -33,33 +30,9 @@
 
 	{ "PNP0D40" },
 
-	/* Haswell LPSS devices */
-	{ "INT33C0", ACPI_PLATFORM_CLK },
-	{ "INT33C1", ACPI_PLATFORM_CLK },
-	{ "INT33C2", ACPI_PLATFORM_CLK },
-	{ "INT33C3", ACPI_PLATFORM_CLK },
-	{ "INT33C4", ACPI_PLATFORM_CLK },
-	{ "INT33C5", ACPI_PLATFORM_CLK },
-	{ "INT33C6", ACPI_PLATFORM_CLK },
-	{ "INT33C7", ACPI_PLATFORM_CLK },
-
 	{ }
 };
 
-static int acpi_create_platform_clks(struct acpi_device *adev)
-{
-	static struct platform_device *pdev;
-
-	/* Create Lynxpoint LPSS clocks */
-	if (!pdev && !strncmp(acpi_device_hid(adev), "INT33C", 6)) {
-		pdev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
-		if (IS_ERR(pdev))
-			return PTR_ERR(pdev);
-	}
-
-	return 0;
-}
-
 /**
  * acpi_create_platform_device - Create platform device for ACPI device node
  * @adev: ACPI device node to create a platform device for.
@@ -71,10 +44,9 @@
  *
  * Name of the platform device will be the same as @adev's.
  */
-static int acpi_create_platform_device(struct acpi_device *adev,
-				       const struct acpi_device_id *id)
+int acpi_create_platform_device(struct acpi_device *adev,
+				const struct acpi_device_id *id)
 {
-	unsigned long flags = id->driver_data;
 	struct platform_device *pdev = NULL;
 	struct acpi_device *acpi_parent;
 	struct platform_device_info pdevinfo;
@@ -83,14 +55,6 @@
 	struct resource *resources;
 	int count;
 
-	if (flags & ACPI_PLATFORM_CLK) {
-		int ret = acpi_create_platform_clks(adev);
-		if (ret) {
-			dev_err(&adev->dev, "failed to create clocks\n");
-			return ret;
-		}
-	}
-
 	/* If the ACPI node already has a physical device attached, skip it. */
 	if (adev->physical_node_count)
 		return 0;
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index a1b9bf5..7ddf29e 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -83,6 +83,7 @@
 acpi-y +=		\
 	nsaccess.o	\
 	nsalloc.o	\
+	nsconvert.o	\
 	nsdump.o	\
 	nseval.o	\
 	nsinit.o	\
@@ -160,6 +161,7 @@
 	utobject.o	\
 	utosi.o		\
 	utownerid.o	\
+	utpredef.o	\
 	utresrc.o	\
 	utstate.o	\
 	utstring.o	\
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index ecb4992..0716092 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -224,6 +224,7 @@
  */
 ACPI_EXTERN acpi_spinlock acpi_gbl_gpe_lock;	/* For GPE data structs and registers */
 ACPI_EXTERN acpi_spinlock acpi_gbl_hardware_lock;	/* For ACPI H/W except GPE registers */
+ACPI_EXTERN acpi_spinlock acpi_gbl_reference_count_lock;
 
 /* Mutex for _OSI support */
 
@@ -413,10 +414,12 @@
 
 #ifdef ACPI_DISASSEMBLER
 
-u8 ACPI_INIT_GLOBAL(acpi_gbl_ignore_noop_operator, FALSE);
+ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_ignore_noop_operator, FALSE);
 
 ACPI_EXTERN u8 acpi_gbl_db_opt_disasm;
 ACPI_EXTERN u8 acpi_gbl_db_opt_verbose;
+ACPI_EXTERN u8 acpi_gbl_num_external_methods;
+ACPI_EXTERN u32 acpi_gbl_resolved_external_methods;
 ACPI_EXTERN struct acpi_external_list *acpi_gbl_external_list;
 ACPI_EXTERN struct acpi_external_file *acpi_gbl_external_file_list;
 #endif
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 805f419..d5bfbd3 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -294,6 +294,8 @@
 #define ACPI_BTYPE_OBJECTS_AND_REFS     0x0001FFFF	/* ARG or LOCAL */
 #define ACPI_BTYPE_ALL_OBJECTS          0x0000FFFF
 
+#pragma pack(1)
+
 /*
  * Information structure for ACPI predefined names.
  * Each entry in the table contains the following items:
@@ -304,7 +306,7 @@
  */
 struct acpi_name_info {
 	char name[ACPI_NAME_SIZE];
-	u8 param_count;
+	u16 argument_list;
 	u8 expected_btypes;
 };
 
@@ -327,7 +329,7 @@
 	u8 count1;
 	u8 object_type2;
 	u8 count2;
-	u8 reserved;
+	u16 reserved;
 };
 
 /* Used for ACPI_PTYPE2_FIXED */
@@ -336,6 +338,7 @@
 	u8 type;
 	u8 count;
 	u8 object_type[4];
+	u8 reserved;
 };
 
 /* Used for ACPI_PTYPE1_OPTION */
@@ -345,7 +348,7 @@
 	u8 count;
 	u8 object_type[2];
 	u8 tail_object_type;
-	u8 reserved;
+	u16 reserved;
 };
 
 union acpi_predefined_info {
@@ -355,6 +358,10 @@
 	struct acpi_package_info3 ret_info3;
 };
 
+/* Reset to default packing */
+
+#pragma pack()
+
 /* Data block used during object validation */
 
 struct acpi_predefined_data {
@@ -363,6 +370,7 @@
 	union acpi_operand_object *parent_package;
 	struct acpi_namespace_node *node;
 	u32 flags;
+	u32 return_btype;
 	u8 node_flags;
 };
 
@@ -371,6 +379,20 @@
 #define ACPI_OBJECT_REPAIRED    1
 #define ACPI_OBJECT_WRAPPED     2
 
+/* Return object auto-repair info */
+
+typedef acpi_status(*acpi_object_converter) (union acpi_operand_object
+					     *original_object,
+					     union acpi_operand_object
+					     **converted_object);
+
+struct acpi_simple_repair_info {
+	char name[ACPI_NAME_SIZE];
+	u32 unexpected_btypes;
+	u32 package_index;
+	acpi_object_converter object_converter;
+};
+
 /*
  * Bitmapped return value types
  * Note: the actual data types must be contiguous, a loop in nspredef.c
@@ -1037,6 +1059,7 @@
 	u16 length;
 	u8 type;
 	u8 flags;
+	u8 resolved;
 };
 
 /* Values for Flags field above */
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index ed7943b..53666bd 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -322,10 +322,12 @@
  * where a pointer to an object of type union acpi_operand_object can also
  * appear. This macro is used to distinguish them.
  *
- * The "Descriptor" field is the first field in both structures.
+ * The "DescriptorType" field is the second field in both structures.
  */
+#define ACPI_GET_DESCRIPTOR_PTR(d)      (((union acpi_descriptor *)(void *)(d))->common.common_pointer)
+#define ACPI_SET_DESCRIPTOR_PTR(d, p)   (((union acpi_descriptor *)(void *)(d))->common.common_pointer = (p))
 #define ACPI_GET_DESCRIPTOR_TYPE(d)     (((union acpi_descriptor *)(void *)(d))->common.descriptor_type)
-#define ACPI_SET_DESCRIPTOR_TYPE(d, t)  (((union acpi_descriptor *)(void *)(d))->common.descriptor_type = t)
+#define ACPI_SET_DESCRIPTOR_TYPE(d, t)  (((union acpi_descriptor *)(void *)(d))->common.descriptor_type = (t))
 
 /*
  * Macros for the master AML opcode table
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 02cd548..d2e4918 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -167,6 +167,29 @@
 int acpi_ns_compare_names(char *name1, char *name2);
 
 /*
+ * nsconvert - Dynamic object conversion routines
+ */
+acpi_status
+acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
+			   union acpi_operand_object **return_object);
+
+acpi_status
+acpi_ns_convert_to_string(union acpi_operand_object *original_object,
+			  union acpi_operand_object **return_object);
+
+acpi_status
+acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
+			  union acpi_operand_object **return_object);
+
+acpi_status
+acpi_ns_convert_to_unicode(union acpi_operand_object *original_object,
+			   union acpi_operand_object **return_object);
+
+acpi_status
+acpi_ns_convert_to_resource(union acpi_operand_object *original_object,
+			    union acpi_operand_object **return_object);
+
+/*
  * nsdump - Namespace dump/print utilities
  */
 #ifdef	ACPI_FUTURE_USAGE
@@ -208,10 +231,6 @@
 			       acpi_status return_status,
 			       union acpi_operand_object **return_object);
 
-const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
-								    acpi_namespace_node
-								    *node);
-
 void
 acpi_ns_check_parameter_count(char *pathname,
 			      struct acpi_namespace_node *node,
@@ -289,7 +308,7 @@
  * predefined methods/objects
  */
 acpi_status
-acpi_ns_repair_object(struct acpi_predefined_data *data,
+acpi_ns_simple_repair(struct acpi_predefined_data *data,
 		      u32 expected_btypes,
 		      u32 package_index,
 		      union acpi_operand_object **return_object_ptr);
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 752cc40..b22b709 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -56,7 +56,7 @@
  *      object type
  *      count
  *
- * ACPI_PTYPE1_VAR: Variable-length length:
+ * ACPI_PTYPE1_VAR: Variable-length length. Zero-length package is allowed:
  *      object type (Int/Buf/Ref)
  *
  * ACPI_PTYPE1_OPTION: Package has some required and some optional elements
@@ -66,14 +66,16 @@
  * 2) PTYPE2 packages contain a Variable-length number of sub-packages. Each
  *    of the different types describe the contents of each of the sub-packages.
  *
- * ACPI_PTYPE2: Each subpackage contains 1 or 2 object types:
+ * ACPI_PTYPE2: Each subpackage contains 1 or 2 object types. Zero-length
+ *      parent package is allowed:
  *      object type
  *      count
  *      object type
  *      count
  *      (Used for _ALR,_MLS,_PSS,_TRT,_TSS)
  *
- * ACPI_PTYPE2_COUNT: Each subpackage has a count as first element:
+ * ACPI_PTYPE2_COUNT: Each subpackage has a count as first element.
+ *      Zero-length parent package is allowed:
  *      object type
  *      (Used for _CSD,_PSD,_TSD)
  *
@@ -84,17 +86,19 @@
  *      count
  *      (Used for _CST)
  *
- * ACPI_PTYPE2_FIXED: Each subpackage is of Fixed-length
+ * ACPI_PTYPE2_FIXED: Each subpackage is of Fixed-length. Zero-length
+ *      parent package is allowed.
  *      (Used for _PRT)
  *
- * ACPI_PTYPE2_MIN: Each subpackage has a Variable-length but minimum length
+ * ACPI_PTYPE2_MIN: Each subpackage has a Variable-length but minimum length.
+ *      Zero-length parent package is allowed:
  *      (Used for _HPX)
  *
  * ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length
  *      (Used for _ART, _FPS)
  *
  * ACPI_PTYPE2_FIX_VAR: Each subpackage consists of some fixed-length elements
- *      followed by an optional element
+ *      followed by an optional element. Zero-length parent package is allowed.
  *      object type
  *      count
  *      object type
@@ -116,8 +120,47 @@
 	ACPI_PTYPE2_FIX_VAR = 10
 };
 
+/* Support macros for users of the predefined info table */
+
+#define METHOD_PREDEF_ARGS_MAX          4
+#define METHOD_ARG_BIT_WIDTH            3
+#define METHOD_ARG_MASK                 0x0007
+#define ARG_COUNT_IS_MINIMUM            0x8000
+#define METHOD_MAX_ARG_TYPE             ACPI_TYPE_PACKAGE
+
+#define METHOD_GET_COUNT(arg_list)      (arg_list & METHOD_ARG_MASK)
+#define METHOD_GET_NEXT_ARG(arg_list)   (arg_list >> METHOD_ARG_BIT_WIDTH)
+
+/* Macros used to build the predefined info table */
+
+#define METHOD_0ARGS                    0
+#define METHOD_1ARGS(a1)                (1 | (a1 << 3))
+#define METHOD_2ARGS(a1,a2)             (2 | (a1 << 3) | (a2 << 6))
+#define METHOD_3ARGS(a1,a2,a3)          (3 | (a1 << 3) | (a2 << 6) | (a3 << 9))
+#define METHOD_4ARGS(a1,a2,a3,a4)       (4 | (a1 << 3) | (a2 << 6) | (a3 << 9) | (a4 << 12))
+
+#define METHOD_RETURNS(type)            (type)
+#define METHOD_NO_RETURN_VALUE          0
+
+#define PACKAGE_INFO(a,b,c,d,e,f)       {{{(a),(b),(c),(d)}, ((((u16)(f)) << 8) | (e)), 0}}
+
+/* Support macros for the resource descriptor info table */
+
+#define WIDTH_1                         0x0001
+#define WIDTH_2                         0x0002
+#define WIDTH_3                         0x0004
+#define WIDTH_8                         0x0008
+#define WIDTH_16                        0x0010
+#define WIDTH_32                        0x0020
+#define WIDTH_64                        0x0040
+#define VARIABLE_DATA                   0x0080
+#define NUM_RESOURCE_WIDTHS             8
+
+#define WIDTH_ADDRESS                   WIDTH_16 | WIDTH_32 | WIDTH_64
+
 #ifdef ACPI_CREATE_PREDEFINED_TABLE
-/*
+/******************************************************************************
+ *
  * Predefined method/object information table.
  *
  * These are the names that can actually be evaluated via acpi_evaluate_object.
@@ -125,23 +168,24 @@
  *
  *      1) Predefined/Reserved names that are never evaluated via
  *         acpi_evaluate_object:
- *          _Lxx and _Exx GPE methods
- *          _Qxx EC methods
- *          _T_x compiler temporary variables
+ *              _Lxx and _Exx GPE methods
+ *              _Qxx EC methods
+ *              _T_x compiler temporary variables
+ *              _Wxx wake events
  *
  *      2) Predefined names that never actually exist within the AML code:
- *          Predefined resource descriptor field names
+ *              Predefined resource descriptor field names
  *
  *      3) Predefined names that are implemented within ACPICA:
- *          _OSI
- *
- *      4) Some predefined names that are not documented within the ACPI spec.
- *          _WDG, _WED
+ *              _OSI
  *
  * The main entries in the table each contain the following items:
  *
  * name                 - The ACPI reserved name
- * param_count          - Number of arguments to the method
+ * argument_list        - Contains (in 16 bits), the number of required
+ *                        arguments to the method (3 bits), and a 3-bit type
+ *                        field for each argument (up to 4 arguments). The
+ *                        METHOD_?ARGS macros generate the correct packed data.
  * expected_btypes      - Allowed type(s) for the return value.
  *                        0 means that no return value is expected.
  *
@@ -151,256 +195,511 @@
  * overall size of the stored data.
  *
  * Note: The additional braces are intended to promote portability.
- */
-static const union acpi_predefined_info predefined_names[] = {
-	{{"_AC0", 0, ACPI_RTYPE_INTEGER}},
-	{{"_AC1", 0, ACPI_RTYPE_INTEGER}},
-	{{"_AC2", 0, ACPI_RTYPE_INTEGER}},
-	{{"_AC3", 0, ACPI_RTYPE_INTEGER}},
-	{{"_AC4", 0, ACPI_RTYPE_INTEGER}},
-	{{"_AC5", 0, ACPI_RTYPE_INTEGER}},
-	{{"_AC6", 0, ACPI_RTYPE_INTEGER}},
-	{{"_AC7", 0, ACPI_RTYPE_INTEGER}},
-	{{"_AC8", 0, ACPI_RTYPE_INTEGER}},
-	{{"_AC9", 0, ACPI_RTYPE_INTEGER}},
-	{{"_ADR", 0, ACPI_RTYPE_INTEGER}},
-	{{"_AEI", 0, ACPI_RTYPE_BUFFER}},
-	{{"_AL0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+ *
+ * Note2: Table is used by the kernel-resident subsystem, the iASL compiler,
+ * and the acpi_help utility.
+ *
+ * TBD: _PRT - currently ignore reversed entries. Attempt to fix in nsrepair.
+ * Possibly fixing package elements like _BIF, etc.
+ *
+ *****************************************************************************/
 
-	{{"_AL1", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
+	{{"_AC0", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_AL2", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+	{{"_AC1", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_AL3", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+	{{"_AC2", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_AL4", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+	{{"_AC3", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_AL5", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+	{{"_AC4", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_AL6", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+	{{"_AC5", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_AL7", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+	{{"_AC6", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_AL8", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+	{{"_AC7", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_AL9", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+	{{"_AC8", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_ALC", 0, ACPI_RTYPE_INTEGER}},
-	{{"_ALI", 0, ACPI_RTYPE_INTEGER}},
-	{{"_ALP", 0, ACPI_RTYPE_INTEGER}},
-	{{"_ALR", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 2 (Ints) */
-			  {{{ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 2,0}, 0,0}},
+	{{"_AC9", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_ALT", 0, ACPI_RTYPE_INTEGER}},
-	{{"_ART", 0, ACPI_RTYPE_PACKAGE}},	/* Variable-length (1 Int(rev), n Pkg (2 Ref/11 Int) */
-	{{{ACPI_PTYPE2_REV_FIXED, ACPI_RTYPE_REFERENCE, 2, ACPI_RTYPE_INTEGER},
-	  11, 0}},
+	{{"_ADR", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_BBN", 0, ACPI_RTYPE_INTEGER}},
-	{{"_BCL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
+	{{"_AEI", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
 
-	{{"_BCM", 1, 0}},
-	{{"_BCT", 1, ACPI_RTYPE_INTEGER}},
-	{{"_BDN", 0, ACPI_RTYPE_INTEGER}},
-	{{"_BFS", 1, 0}},
-	{{"_BIF", 0, ACPI_RTYPE_PACKAGE}},	/* Fixed-length (9 Int),(4 Str) */
-	{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9, ACPI_RTYPE_STRING}, 4, 0}},
+	{{"_AL0", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
 
-	{{"_BIX", 0, ACPI_RTYPE_PACKAGE}},	/* Fixed-length (16 Int),(4 Str) */
-	{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16, ACPI_RTYPE_STRING}, 4,
-	  0}},
+	{{"_AL1", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
 
-	{{"_BLT", 3, 0}},
-	{{"_BMA", 1, ACPI_RTYPE_INTEGER}},
-	{{"_BMC", 1, 0}},
-	{{"_BMD", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (5 Int) */
-			  {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
+	{{"_AL2", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
 
-	{{"_BMS", 1, ACPI_RTYPE_INTEGER}},
-	{{"_BQC", 0, ACPI_RTYPE_INTEGER}},
-	{{"_BST", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
-			  {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4,0}, 0,0}},
+	{{"_AL3", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
 
-	{{"_BTM", 1, ACPI_RTYPE_INTEGER}},
-	{{"_BTP", 1, 0}},
-	{{"_CBA", 0, ACPI_RTYPE_INTEGER}}, /* See PCI firmware spec 3.0 */
-	{{"_CDM", 0, ACPI_RTYPE_INTEGER}},
-	{{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */
-	{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0, 0}, 0,
-	  0}},
+	{{"_AL4", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
 
-	{{"_CLS", 0, ACPI_RTYPE_PACKAGE}},	/* Fixed-length (3 Int) */
-	{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
+	{{"_AL5", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
 
-	{{"_CPC", 0, ACPI_RTYPE_PACKAGE}},	/* Variable-length (Ints/Bufs) */
-	{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER, 0, 0}, 0,
-	  0}},
+	{{"_AL6", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
 
-	{{"_CRS", 0, ACPI_RTYPE_BUFFER}},
-	{{"_CRT", 0, ACPI_RTYPE_INTEGER}},
-	{{"_CSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n-1 Int) */
-			  {{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
+	{{"_AL7", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
 
-	{{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */
-	{{{ACPI_PTYPE2_PKG_COUNT, ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,
-	  0}},
+	{{"_AL8", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
 
-	{{"_CWS", 1, ACPI_RTYPE_INTEGER}},
-	{{"_DCK", 1, ACPI_RTYPE_INTEGER}},
-	{{"_DCS", 0, ACPI_RTYPE_INTEGER}},
-	{{"_DDC", 1, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER}},
-	{{"_DDN", 0, ACPI_RTYPE_STRING}},
-	{{"_DEP", 0, ACPI_RTYPE_PACKAGE}},	/* Variable-length (Refs) */
-	{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+	{{"_AL9", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
 
-	{{"_DGS", 0, ACPI_RTYPE_INTEGER}},
-	{{"_DIS", 0, 0}},
+	{{"_ALC", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_DLM", 0, ACPI_RTYPE_PACKAGE}},	/* Variable-length (Pkgs) each (1 Ref, 0/1 Optional Buf/Ref) */
-	{{{ACPI_PTYPE2_FIX_VAR, ACPI_RTYPE_REFERENCE, 1,
-	   ACPI_RTYPE_REFERENCE | ACPI_RTYPE_BUFFER}, 0, 0}},
+	{{"_ALI", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_DMA", 0, ACPI_RTYPE_BUFFER}},
-	{{"_DOD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
+	{{"_ALP", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_DOS", 1, 0}},
-	{{"_DSM", 4, ACPI_RTYPE_ALL}},     /* Must return a type, but it can be of any type */
-	{{"_DSS", 1, 0}},
-	{{"_DSW", 3, 0}},
-	{{"_DTI", 1, 0}},
-	{{"_EC_", 0, ACPI_RTYPE_INTEGER}},
-	{{"_EDL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs)*/
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+	{{"_ALR", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Pkgs) each 2 (Ints) */
+	PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 2, 0, 0, 0),
 
-	{{"_EJ0", 1, 0}},
-	{{"_EJ1", 1, 0}},
-	{{"_EJ2", 1, 0}},
-	{{"_EJ3", 1, 0}},
-	{{"_EJ4", 1, 0}},
-	{{"_EJD", 0, ACPI_RTYPE_STRING}},
-	{{"_EVT", 1, 0}},
-	{{"_FDE", 0, ACPI_RTYPE_BUFFER}},
-	{{"_FDI", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int) */
-			  {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,0}, 0,0}},
+	{{"_ALT", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_FDM", 1, 0}},
-	{{"_FIF", 0, ACPI_RTYPE_PACKAGE}},	/* Fixed-length (4 Int) */
-			  {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0}, 0, 0}},
+	{{"_ART", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (1 Int(rev), n Pkg (2 Ref/11 Int) */
+	PACKAGE_INFO(ACPI_PTYPE2_REV_FIXED, ACPI_RTYPE_REFERENCE, 2,
+		     ACPI_RTYPE_INTEGER, 11, 0),
 
-	{{"_FIX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
+	{{"_BBN", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_FPS", 0, ACPI_RTYPE_PACKAGE}},	/* Variable-length (1 Int(rev), n Pkg (5 Int) */
-	{{{ACPI_PTYPE2_REV_FIXED, ACPI_RTYPE_INTEGER, 5, 0}, 0, 0}},
+	{{"_BCL", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Ints) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0, 0, 0, 0),
 
-	{{"_FSL", 1, 0}},
-	{{"_FST", 0, ACPI_RTYPE_PACKAGE}},	/* Fixed-length (3 Int) */
-	{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
+	{{"_BCM", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
 
-	{{"_GAI", 0, ACPI_RTYPE_INTEGER}},
-	{{"_GCP", 0, ACPI_RTYPE_INTEGER}},
-	{{"_GHL", 0, ACPI_RTYPE_INTEGER}},
-	{{"_GLK", 0, ACPI_RTYPE_INTEGER}},
-	{{"_GPD", 0, ACPI_RTYPE_INTEGER}},
-	{{"_GPE", 0, ACPI_RTYPE_INTEGER}}, /* _GPE method, not _GPE scope */
-	{{"_GRT", 0, ACPI_RTYPE_BUFFER}},
-	{{"_GSB", 0, ACPI_RTYPE_INTEGER}},
-	{{"_GTF", 0, ACPI_RTYPE_BUFFER}},
-	{{"_GTM", 0, ACPI_RTYPE_BUFFER}},
-	{{"_GTS", 1, 0}},
-	{{"_GWS", 1, ACPI_RTYPE_INTEGER}},
-	{{"_HID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}},
-	{{"_HOT", 0, ACPI_RTYPE_INTEGER}},
-	{{"_HPP", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
-			  {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4,0}, 0,0}},
+	{{"_BCT", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_BDN", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_BFS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_BIF", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (9 Int),(4 Str) */
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9,
+		     ACPI_RTYPE_STRING, 4, 0),
+
+	{{"_BIX", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (16 Int),(4 Str) */
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,
+		     ACPI_RTYPE_STRING, 4, 0),
+
+	{{"_BLT",
+	  METHOD_3ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_BMA", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_BMC", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_BMD", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (5 Int) */
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 5, 0, 0, 0),
+
+	{{"_BMS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_BQC", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_BST", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (4 Int) */
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0),
+
+	{{"_BTM", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_BTP", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_CBA", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},	/* See PCI firmware spec 3.0 */
+
+	{{"_CDM", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_CID", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Ints/Strs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0,
+		     0, 0, 0),
+
+	{{"_CLS", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (3 Int) */
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0, 0, 0),
+
+	{{"_CPC", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Ints/Bufs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER, 0,
+		     0, 0, 0),
+
+	{{"_CRS", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+	{{"_CRT", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_CSD", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (1 Int(n), n-1 Int) */
+	PACKAGE_INFO(ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 0, 0, 0, 0),
+
+	{{"_CST", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */
+	PACKAGE_INFO(ACPI_PTYPE2_PKG_COUNT, ACPI_RTYPE_BUFFER, 1,
+		     ACPI_RTYPE_INTEGER, 3, 0),
+
+	{{"_CWS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_DCK", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_DCS", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_DDC", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER)}},
+
+	{{"_DDN", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_STRING)}},
+
+	{{"_DEP", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+	{{"_DGS", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_DIS", METHOD_0ARGS,
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_DLM", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Pkgs) each (1 Ref, 0/1 Optional Buf/Ref) */
+	PACKAGE_INFO(ACPI_PTYPE2_FIX_VAR, ACPI_RTYPE_REFERENCE, 1,
+		     ACPI_RTYPE_REFERENCE | ACPI_RTYPE_BUFFER, 0, 0),
+
+	{{"_DMA", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+	{{"_DOD", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Ints) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0, 0, 0, 0),
+
+	{{"_DOS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_DSM",
+	  METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
+		       ACPI_TYPE_PACKAGE),
+	  METHOD_RETURNS(ACPI_RTYPE_ALL)}},	/* Must return a value, but it can be of any type */
+
+	{{"_DSS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_DSW",
+	  METHOD_3ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_DTI", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_EC_", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_EDL", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+	{{"_EJ0", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_EJ1", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_EJ2", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_EJ3", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_EJ4", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_EJD", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_STRING)}},
+
+	{{"_ERR",
+	  METHOD_3ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_STRING, ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},	/* Internal use only, used by ACPICA test suites */
+
+	{{"_EVT", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_FDE", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+	{{"_FDI", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (16 Int) */
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16, 0, 0, 0),
+
+	{{"_FDM", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_FIF", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (4 Int) */
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0),
+
+	{{"_FIX", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Ints) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0, 0, 0, 0),
+
+	{{"_FPS", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (1 Int(rev), n Pkg (5 Int) */
+	PACKAGE_INFO(ACPI_PTYPE2_REV_FIXED, ACPI_RTYPE_INTEGER, 5, 0, 0, 0),
+
+	{{"_FSL", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_FST", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (3 Int) */
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0, 0, 0),
+
+	{{"_GAI", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_GCP", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_GHL", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_GLK", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_GPD", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_GPE", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},	/* _GPE method, not _GPE scope */
+
+	{{"_GRT", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+	{{"_GSB", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_GTF", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+	{{"_GTM", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+	{{"_GTS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_GWS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_HID", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING)}},
+
+	{{"_HOT", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_HPP", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (4 Int) */
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0),
 
 	/*
-	 * For _HPX, a single package is returned, containing a Variable-length number
+	 * For _HPX, a single package is returned, containing a variable-length number
 	 * of sub-packages. Each sub-package contains a PCI record setting.
 	 * There are several different type of record settings, of different
 	 * lengths, but all elements of all settings are Integers.
 	 */
-	{{"_HPX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (var Ints) */
-			  {{{ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
+	{{"_HPX", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Pkgs) each (var Ints) */
+	PACKAGE_INFO(ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5, 0, 0, 0),
 
-	{{"_HRV", 0, ACPI_RTYPE_INTEGER}},
-	{{"_IFT", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
-	{{"_INI", 0, 0}},
-	{{"_IRC", 0, 0}},
-	{{"_LCK", 1, 0}},
-	{{"_LID", 0, ACPI_RTYPE_INTEGER}},
-	{{"_MAT", 0, ACPI_RTYPE_BUFFER}},
-	{{"_MBM", 0, ACPI_RTYPE_PACKAGE}},	/* Fixed-length (8 Int) */
-	{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 8, 0}, 0, 0}},
+	{{"_HRV", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_MLS", 0, ACPI_RTYPE_PACKAGE}},	/* Variable-length (Pkgs) each (1 Str/1 Buf) */
-	{{{ACPI_PTYPE2, ACPI_RTYPE_STRING, 1, ACPI_RTYPE_BUFFER}, 1, 0}},
+	{{"_IFT", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},	/* See IPMI spec */
 
-	{{"_MSG", 1, 0}},
-	{{"_MSM", 4, ACPI_RTYPE_INTEGER}},
-	{{"_NTT", 0, ACPI_RTYPE_INTEGER}},
-	{{"_OFF", 0, 0}},
-	{{"_ON_", 0, 0}},
-	{{"_OS_", 0, ACPI_RTYPE_STRING}},
-	{{"_OSC", 4, ACPI_RTYPE_BUFFER}},
-	{{"_OST", 3, 0}},
-	{{"_PAI", 1, ACPI_RTYPE_INTEGER}},
-	{{"_PCL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+	{{"_INI", METHOD_0ARGS,
+	  METHOD_NO_RETURN_VALUE}},
 
-	{{"_PCT", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (2 Buf) */
-			  {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_BUFFER, 2,0}, 0,0}},
+	{{"_IRC", METHOD_0ARGS,
+	  METHOD_NO_RETURN_VALUE}},
 
-	{{"_PDC", 1, 0}},
-	{{"_PDL", 0, ACPI_RTYPE_INTEGER}},
-	{{"_PIC", 1, 0}},
-	{{"_PIF", 0, ACPI_RTYPE_PACKAGE}},	/* Fixed-length (3 Int),(3 Str) */
-	{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, ACPI_RTYPE_STRING}, 3, 0}},
+	{{"_LCK", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
 
-	{{"_PLD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Bufs) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_BUFFER, 0,0}, 0,0}},
+	{{"_LID", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_PMC", 0, ACPI_RTYPE_PACKAGE}},	/* Fixed-length (11 Int),(3 Str) */
-	{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 11, ACPI_RTYPE_STRING}, 3,
-	  0}},
+	{{"_MAT", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
 
-	{{"_PMD", 0, ACPI_RTYPE_PACKAGE}},	/* Variable-length (Refs) */
-	{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+	{{"_MBM", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (8 Int) */
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 8, 0, 0, 0),
 
-	{{"_PMM", 0, ACPI_RTYPE_INTEGER}},
-	{{"_PPC", 0, ACPI_RTYPE_INTEGER}},
-	{{"_PPE", 0, ACPI_RTYPE_INTEGER}}, /* See dig64 spec */
-	{{"_PR0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+	{{"_MLS", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Pkgs) each (1 Str/1 Buf) */
+	PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_STRING, 1, ACPI_RTYPE_BUFFER, 1,
+		     0),
 
-	{{"_PR1", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+	{{"_MSG", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
 
-	{{"_PR2", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+	{{"_MSM",
+	  METHOD_4ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
+		       ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_PR3", 0, ACPI_RTYPE_PACKAGE}},	/* Variable-length (Refs) */
-	{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+	{{"_NTT", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_PRE", 0, ACPI_RTYPE_PACKAGE}},	/* Variable-length (Refs) */
-	{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+	{{"_OFF", METHOD_0ARGS,
+	  METHOD_NO_RETURN_VALUE}},
 
-	{{"_PRL", 0, ACPI_RTYPE_PACKAGE}},	/* Variable-length (Refs) */
-	{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+	{{"_ON_", METHOD_0ARGS,
+	  METHOD_NO_RETURN_VALUE}},
 
-	{{"_PRS", 0, ACPI_RTYPE_BUFFER}},
+	{{"_OS_", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_STRING)}},
+
+	{{"_OSC",
+	  METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
+		       ACPI_TYPE_BUFFER),
+	  METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+	{{"_OST",
+	  METHOD_3ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER, ACPI_TYPE_BUFFER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_PAI", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_PCL", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+	{{"_PCT", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (2 Buf) */
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_BUFFER, 2, 0, 0, 0),
+
+	{{"_PDC", METHOD_1ARGS(ACPI_TYPE_BUFFER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_PDL", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_PIC", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_PIF", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (3 Int),(3 Str) */
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3,
+		     ACPI_RTYPE_STRING, 3, 0),
+
+	{{"_PLD", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Bufs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_BUFFER, 0, 0, 0, 0),
+
+	{{"_PMC", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (11 Int),(3 Str) */
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 11,
+		     ACPI_RTYPE_STRING, 3, 0),
+
+	{{"_PMD", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+	{{"_PMM", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_PPC", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_PPE", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},	/* See dig64 spec */
+
+	{{"_PR0", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+	{{"_PR1", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+	{{"_PR2", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+	{{"_PR3", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+	{{"_PRE", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+	{{"_PRL", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+	{{"_PRS", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
 
 	/*
 	 * For _PRT, many BIOSs reverse the 3rd and 4th Package elements (Source
@@ -410,47 +709,89 @@
 	 * warning, add the ACPI_RTYPE_REFERENCE type to the 4th element (index 3)
 	 * in the statement below.
 	 */
-	{{"_PRT", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (4): Int,Int,Int/Ref,Int */
-			  {{{ACPI_PTYPE2_FIXED, 4, ACPI_RTYPE_INTEGER,ACPI_RTYPE_INTEGER},
-			  ACPI_RTYPE_INTEGER | ACPI_RTYPE_REFERENCE,
-			  ACPI_RTYPE_INTEGER}},
+	{{"_PRT", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Pkgs) each (4): Int,Int,Int/Ref,Int */
+	PACKAGE_INFO(ACPI_PTYPE2_FIXED, 4, ACPI_RTYPE_INTEGER,
+		     ACPI_RTYPE_INTEGER,
+		     ACPI_RTYPE_INTEGER | ACPI_RTYPE_REFERENCE,
+		     ACPI_RTYPE_INTEGER),
 
-	{{"_PRW", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each: Pkg/Int,Int,[Variable-length Refs] (Pkg is Ref/Int) */
-			  {{{ACPI_PTYPE1_OPTION, 2, ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE,
-			  ACPI_RTYPE_INTEGER}, ACPI_RTYPE_REFERENCE,0}},
+	{{"_PRW", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Pkgs) each: Pkg/Int,Int,[Variable-length Refs] (Pkg is Ref/Int) */
+	PACKAGE_INFO(ACPI_PTYPE1_OPTION, 2,
+		     ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE,
+		     ACPI_RTYPE_INTEGER, ACPI_RTYPE_REFERENCE, 0),
 
-	{{"_PS0", 0, 0}},
-	{{"_PS1", 0, 0}},
-	{{"_PS2", 0, 0}},
-	{{"_PS3", 0, 0}},
-	{{"_PSC", 0, ACPI_RTYPE_INTEGER}},
-	{{"_PSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (5 Int) with count */
-			  {{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER,0,0}, 0,0}},
+	{{"_PS0", METHOD_0ARGS,
+	  METHOD_NO_RETURN_VALUE}},
 
-	{{"_PSE", 1, 0}},
-	{{"_PSL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+	{{"_PS1", METHOD_0ARGS,
+	  METHOD_NO_RETURN_VALUE}},
 
-	{{"_PSR", 0, ACPI_RTYPE_INTEGER}},
-	{{"_PSS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (6 Int) */
-			  {{{ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 6,0}, 0,0}},
+	{{"_PS2", METHOD_0ARGS,
+	  METHOD_NO_RETURN_VALUE}},
 
-	{{"_PSV", 0, ACPI_RTYPE_INTEGER}},
-	{{"_PSW", 1, 0}},
-	{{"_PTC", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (2 Buf) */
-			  {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_BUFFER, 2,0}, 0,0}},
+	{{"_PS3", METHOD_0ARGS,
+	  METHOD_NO_RETURN_VALUE}},
 
-	{{"_PTP", 2, ACPI_RTYPE_INTEGER}},
-	{{"_PTS", 1, 0}},
-	{{"_PUR", 0, ACPI_RTYPE_PACKAGE}},	/* Fixed-length (2 Int) */
-	{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2, 0}, 0, 0}},
+	{{"_PSC", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_PXM", 0, ACPI_RTYPE_INTEGER}},
-	{{"_REG", 2, 0}},
-	{{"_REV", 0, ACPI_RTYPE_INTEGER}},
-	{{"_RMV", 0, ACPI_RTYPE_INTEGER}},
-	{{"_ROM", 2, ACPI_RTYPE_BUFFER}},
-	{{"_RTV", 0, ACPI_RTYPE_INTEGER}},
+	{{"_PSD", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Pkgs) each (5 Int) with count */
+	PACKAGE_INFO(ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 0, 0, 0, 0),
+
+	{{"_PSE", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_PSL", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+	{{"_PSR", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_PSS", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Pkgs) each (6 Int) */
+	PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 6, 0, 0, 0),
+
+	{{"_PSV", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_PSW", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_PTC", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (2 Buf) */
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_BUFFER, 2, 0, 0, 0),
+
+	{{"_PTP", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_PTS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_PUR", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (2 Int) */
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2, 0, 0, 0),
+
+	{{"_PXM", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_REG", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_REV", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_RMV", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_ROM", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+	{{"_RTV", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
 	/*
 	 * For _S0_ through _S5_, the ACPI spec defines a return Package
@@ -458,111 +799,285 @@
 	 * Allow this by making the objects "Variable-length length", but all elements
 	 * must be Integers.
 	 */
-	{{"_S0_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}},
+	{{"_S0_", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (1 Int) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0),
 
-	{{"_S1_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}},
+	{{"_S1_", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (1 Int) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0),
 
-	{{"_S2_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}},
+	{{"_S2_", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (1 Int) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0),
 
-	{{"_S3_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}},
+	{{"_S3_", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (1 Int) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0),
 
-	{{"_S4_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}},
+	{{"_S4_", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (1 Int) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0),
 
-	{{"_S5_", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (1 Int) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1,0}, 0,0}},
+	{{"_S5_", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (1 Int) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 1, 0, 0, 0),
 
-	{{"_S1D", 0, ACPI_RTYPE_INTEGER}},
-	{{"_S2D", 0, ACPI_RTYPE_INTEGER}},
-	{{"_S3D", 0, ACPI_RTYPE_INTEGER}},
-	{{"_S4D", 0, ACPI_RTYPE_INTEGER}},
-	{{"_S0W", 0, ACPI_RTYPE_INTEGER}},
-	{{"_S1W", 0, ACPI_RTYPE_INTEGER}},
-	{{"_S2W", 0, ACPI_RTYPE_INTEGER}},
-	{{"_S3W", 0, ACPI_RTYPE_INTEGER}},
-	{{"_S4W", 0, ACPI_RTYPE_INTEGER}},
-	{{"_SBS", 0, ACPI_RTYPE_INTEGER}},
-	{{"_SCP", 0x13, 0}},               /* Acpi 1.0 allowed 1 arg. Acpi 3.0 expanded to 3 args. Allow both. */
-			   /* Note: the 3-arg definition may be removed for ACPI 4.0 */
-	{{"_SDD", 1, 0}},
-	{{"_SEG", 0, ACPI_RTYPE_INTEGER}},
-	{{"_SHL", 1, ACPI_RTYPE_INTEGER}},
-	{{"_SLI", 0, ACPI_RTYPE_BUFFER}},
-	{{"_SPD", 1, ACPI_RTYPE_INTEGER}},
-	{{"_SRS", 1, 0}},
-	{{"_SRT", 1, ACPI_RTYPE_INTEGER}},
-	{{"_SRV", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
-	{{"_SST", 1, 0}},
-	{{"_STA", 0, ACPI_RTYPE_INTEGER}},
-	{{"_STM", 3, 0}},
-	{{"_STP", 2, ACPI_RTYPE_INTEGER}},
-	{{"_STR", 0, ACPI_RTYPE_BUFFER}},
-	{{"_STV", 2, ACPI_RTYPE_INTEGER}},
-	{{"_SUB", 0, ACPI_RTYPE_STRING}},
-	{{"_SUN", 0, ACPI_RTYPE_INTEGER}},
-	{{"_SWS", 0, ACPI_RTYPE_INTEGER}},
-	{{"_TC1", 0, ACPI_RTYPE_INTEGER}},
-	{{"_TC2", 0, ACPI_RTYPE_INTEGER}},
-	{{"_TDL", 0, ACPI_RTYPE_INTEGER}},
-	{{"_TIP", 1, ACPI_RTYPE_INTEGER}},
-	{{"_TIV", 1, ACPI_RTYPE_INTEGER}},
-	{{"_TMP", 0, ACPI_RTYPE_INTEGER}},
-	{{"_TPC", 0, ACPI_RTYPE_INTEGER}},
-	{{"_TPT", 1, 0}},
-	{{"_TRT", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 2 Ref/6 Int */
-			  {{{ACPI_PTYPE2, ACPI_RTYPE_REFERENCE, 2, ACPI_RTYPE_INTEGER}, 6, 0}},
+	{{"_S1D", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_TSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5 Int with count */
-			  {{{ACPI_PTYPE2_COUNT,ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
+	{{"_S2D", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_TSP", 0, ACPI_RTYPE_INTEGER}},
-	{{"_TSS", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each 5 Int */
-			  {{{ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
+	{{"_S3D", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_TST", 0, ACPI_RTYPE_INTEGER}},
-	{{"_TTS", 1, 0}},
-	{{"_TZD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
-			  {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
+	{{"_S4D", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_TZM", 0, ACPI_RTYPE_REFERENCE}},
-	{{"_TZP", 0, ACPI_RTYPE_INTEGER}},
-	{{"_UID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}},
-	{{"_UPC", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
-			  {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4,0}, 0,0}},
+	{{"_S0W", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
-	{{"_UPD", 0, ACPI_RTYPE_INTEGER}},
-	{{"_UPP", 0, ACPI_RTYPE_INTEGER}},
-	{{"_VPO", 0, ACPI_RTYPE_INTEGER}},
+	{{"_S1W", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_S2W", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_S3W", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_S4W", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_SBS", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_SCP", METHOD_1ARGS(ACPI_TYPE_INTEGER) | ARG_COUNT_IS_MINIMUM,
+	  METHOD_NO_RETURN_VALUE}},	/* Acpi 1.0 allowed 1 integer arg. Acpi 3.0 expanded to 3 args. Allow both. */
+
+	{{"_SDD", METHOD_1ARGS(ACPI_TYPE_BUFFER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_SEG", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_SHL", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_SLI", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+	{{"_SPD", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_SRS", METHOD_1ARGS(ACPI_TYPE_BUFFER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_SRT", METHOD_1ARGS(ACPI_TYPE_BUFFER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_SRV", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},	/* See IPMI spec */
+
+	{{"_SST", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_STA", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_STM",
+	  METHOD_3ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_BUFFER, ACPI_TYPE_BUFFER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_STP", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_STR", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
+
+	{{"_STV", METHOD_2ARGS(ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_SUB", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_STRING)}},
+
+	{{"_SUN", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_SWS", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_TC1", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_TC2", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_TDL", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_TIP", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_TIV", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_TMP", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_TPC", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_TPT", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_TRT", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Pkgs) each 2 Ref/6 Int */
+	PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_REFERENCE, 2, ACPI_RTYPE_INTEGER,
+		     6, 0),
+
+	{{"_TSD", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Pkgs) each 5 Int with count */
+	PACKAGE_INFO(ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER, 5, 0, 0, 0),
+
+	{{"_TSP", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_TSS", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Pkgs) each 5 Int */
+	PACKAGE_INFO(ACPI_PTYPE2, ACPI_RTYPE_INTEGER, 5, 0, 0, 0),
+
+	{{"_TST", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_TTS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_NO_RETURN_VALUE}},
+
+	{{"_TZD", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Variable-length (Refs) */
+	PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0, 0, 0),
+
+	{{"_TZM", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_REFERENCE)}},
+
+	{{"_TZP", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_UID", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING)}},
+
+	{{"_UPC", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}},	/* Fixed-length (4 Int) */
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0),
+
+	{{"_UPD", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_UPP", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
+	{{"_VPO", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
 	/* Acpi 1.0 defined _WAK with no return value. Later, it was changed to return a package */
 
-	{{"_WAK", 1,
-          ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER | ACPI_RTYPE_PACKAGE}},
-			  {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2,0}, 0,0}}, /* Fixed-length (2 Int), but is optional */
+	{{"_WAK", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_NONE | ACPI_RTYPE_INTEGER |
+			 ACPI_RTYPE_PACKAGE)}},
+	PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 2, 0, 0, 0),	/* Fixed-length (2 Int), but is optional */
 
 	/* _WDG/_WED are MS extensions defined by "Windows Instrumentation" */
 
-	{{"_WDG", 0, ACPI_RTYPE_BUFFER}},
-	{{"_WED", 1,
-	  ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER}},
+	{{"_WDG", METHOD_0ARGS,
+	  METHOD_RETURNS(ACPI_RTYPE_BUFFER)}},
 
-	{{{0, 0, 0, 0}, 0, 0}}  /* Table terminator */
+	{{"_WED", METHOD_1ARGS(ACPI_TYPE_INTEGER),
+	  METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING |
+			 ACPI_RTYPE_BUFFER)}},
+
+	PACKAGE_INFO(0, 0, 0, 0, 0, 0)	/* Table terminator */
+};
+#else
+extern const union acpi_predefined_info acpi_gbl_predefined_methods[];
+#endif
+
+#if (defined ACPI_CREATE_RESOURCE_TABLE && defined ACPI_APPLICATION)
+/******************************************************************************
+ *
+ * Predefined names for use in Resource Descriptors. These names do not
+ * appear in the global Predefined Name table (since these names never
+ * appear in actual AML byte code, only in the original ASL)
+ *
+ * Note: Used by iASL compiler and acpi_help utility only.
+ *
+ *****************************************************************************/
+
+const union acpi_predefined_info acpi_gbl_resource_names[] = {
+	{{"_ADR", WIDTH_16 | WIDTH_64, 0}},
+	{{"_ALN", WIDTH_8 | WIDTH_16 | WIDTH_32, 0}},
+	{{"_ASI", WIDTH_8, 0}},
+	{{"_ASZ", WIDTH_8, 0}},
+	{{"_ATT", WIDTH_64, 0}},
+	{{"_BAS", WIDTH_16 | WIDTH_32, 0}},
+	{{"_BM_", WIDTH_1, 0}},
+	{{"_DBT", WIDTH_16, 0}},	/* Acpi 5.0 */
+	{{"_DEC", WIDTH_1, 0}},
+	{{"_DMA", WIDTH_8, 0}},
+	{{"_DPL", WIDTH_1, 0}},	/* Acpi 5.0 */
+	{{"_DRS", WIDTH_16, 0}},	/* Acpi 5.0 */
+	{{"_END", WIDTH_1, 0}},	/* Acpi 5.0 */
+	{{"_FLC", WIDTH_2, 0}},	/* Acpi 5.0 */
+	{{"_GRA", WIDTH_ADDRESS, 0}},
+	{{"_HE_", WIDTH_1, 0}},
+	{{"_INT", WIDTH_16 | WIDTH_32, 0}},
+	{{"_IOR", WIDTH_2, 0}},	/* Acpi 5.0 */
+	{{"_LEN", WIDTH_8 | WIDTH_ADDRESS, 0}},
+	{{"_LIN", WIDTH_8, 0}},	/* Acpi 5.0 */
+	{{"_LL_", WIDTH_1, 0}},
+	{{"_MAF", WIDTH_1, 0}},
+	{{"_MAX", WIDTH_ADDRESS, 0}},
+	{{"_MEM", WIDTH_2, 0}},
+	{{"_MIF", WIDTH_1, 0}},
+	{{"_MIN", WIDTH_ADDRESS, 0}},
+	{{"_MOD", WIDTH_1, 0}},	/* Acpi 5.0 */
+	{{"_MTP", WIDTH_2, 0}},
+	{{"_PAR", WIDTH_8, 0}},	/* Acpi 5.0 */
+	{{"_PHA", WIDTH_1, 0}},	/* Acpi 5.0 */
+	{{"_PIN", WIDTH_16, 0}},	/* Acpi 5.0 */
+	{{"_PPI", WIDTH_8, 0}},	/* Acpi 5.0 */
+	{{"_POL", WIDTH_1 | WIDTH_2, 0}},	/* Acpi 5.0 */
+	{{"_RBO", WIDTH_8, 0}},
+	{{"_RBW", WIDTH_8, 0}},
+	{{"_RNG", WIDTH_1, 0}},
+	{{"_RT_", WIDTH_8, 0}},	/* Acpi 3.0 */
+	{{"_RW_", WIDTH_1, 0}},
+	{{"_RXL", WIDTH_16, 0}},	/* Acpi 5.0 */
+	{{"_SHR", WIDTH_2, 0}},
+	{{"_SIZ", WIDTH_2, 0}},
+	{{"_SLV", WIDTH_1, 0}},	/* Acpi 5.0 */
+	{{"_SPE", WIDTH_32, 0}},	/* Acpi 5.0 */
+	{{"_STB", WIDTH_2, 0}},	/* Acpi 5.0 */
+	{{"_TRA", WIDTH_ADDRESS, 0}},
+	{{"_TRS", WIDTH_1, 0}},
+	{{"_TSF", WIDTH_8, 0}},	/* Acpi 3.0 */
+	{{"_TTP", WIDTH_1, 0}},
+	{{"_TXL", WIDTH_16, 0}},	/* Acpi 5.0 */
+	{{"_TYP", WIDTH_2 | WIDTH_16, 0}},
+	{{"_VEN", VARIABLE_DATA, 0}},	/* Acpi 5.0 */
+	PACKAGE_INFO(0, 0, 0, 0, 0, 0)	/* Table terminator */
 };
 
-#if 0
-
-	/* This is an internally implemented control method, no need to check */
-{ {
-"_OSI", 1, ACPI_RTYPE_INTEGER}},
-
-	/* TBD: */
-	_PRT - currently ignore reversed entries. attempt to fix here?
-	think about possibly fixing package elements like _BIF, etc.
+static const union acpi_predefined_info acpi_gbl_scope_names[] = {
+	{{"_GPE", 0, 0}},
+	{{"_PR_", 0, 0}},
+	{{"_SB_", 0, 0}},
+	{{"_SI_", 0, 0}},
+	{{"_TZ_", 0, 0}},
+	PACKAGE_INFO(0, 0, 0, 0, 0, 0)	/* Table terminator */
+};
+#else
+extern const union acpi_predefined_info acpi_gbl_resource_names[];
 #endif
 
 #endif
-#endif
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 0082fa0..202f4f1 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -113,9 +113,10 @@
 	u32 num_packages;
 };
 
+/* Object reference counts */
+
 #define REF_INCREMENT       (u16) 0
 #define REF_DECREMENT       (u16) 1
-#define REF_FORCE_DELETE    (u16) 2
 
 /* acpi_ut_dump_buffer */
 
@@ -421,7 +422,7 @@
  */
 acpi_status acpi_ut_initialize_interfaces(void);
 
-void acpi_ut_interface_terminate(void);
+acpi_status acpi_ut_interface_terminate(void);
 
 acpi_status acpi_ut_install_interface(acpi_string interface_name);
 
@@ -432,6 +433,26 @@
 acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state);
 
 /*
+ * utpredef - support for predefined names
+ */
+const union acpi_predefined_info *acpi_ut_get_next_predefined_method(const union
+								     acpi_predefined_info
+								     *this_name);
+
+const union acpi_predefined_info *acpi_ut_match_predefined_method(char *name);
+
+const union acpi_predefined_info *acpi_ut_match_resource_name(char *name);
+
+void
+acpi_ut_display_predefined_method(char *buffer,
+				  const union acpi_predefined_info *this_name,
+				  u8 multi_line);
+
+void acpi_ut_get_expected_return_types(char *buffer, u32 expected_btypes);
+
+u32 acpi_ut_get_resource_bit_width(char *buffer, u16 types);
+
+/*
  * utstate - Generic state creation/cache routines
  */
 void
@@ -483,7 +504,8 @@
 /*
  * utmisc
  */
-const char *acpi_ut_validate_exception(acpi_status status);
+const struct acpi_exception_info *acpi_ut_validate_exception(acpi_status
+							     status);
 
 u8 acpi_ut_is_pci_root_bridge(char *id);
 
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 4d8c992..9977899 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -178,7 +178,7 @@
 
 	if (!op) {
 		ACPI_ERROR((AE_INFO, "Null Op"));
-		return_VALUE(TRUE);
+		return_UINT8(TRUE);
 	}
 
 	/*
@@ -210,7 +210,7 @@
 				  "At Method level, result of [%s] not used\n",
 				  acpi_ps_get_opcode_name(op->common.
 							  aml_opcode)));
-		return_VALUE(FALSE);
+		return_UINT8(FALSE);
 	}
 
 	/* Get info on the parent. The root_op is AML_SCOPE */
@@ -219,7 +219,7 @@
 	    acpi_ps_get_opcode_info(op->common.parent->common.aml_opcode);
 	if (parent_info->class == AML_CLASS_UNKNOWN) {
 		ACPI_ERROR((AE_INFO, "Unknown parent opcode Op=%p", op));
-		return_VALUE(FALSE);
+		return_UINT8(FALSE);
 	}
 
 	/*
@@ -307,7 +307,7 @@
 			  acpi_ps_get_opcode_name(op->common.parent->common.
 						  aml_opcode), op));
 
-	return_VALUE(TRUE);
+	return_UINT8(TRUE);
 
       result_not_used:
 	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
@@ -316,7 +316,7 @@
 			  acpi_ps_get_opcode_name(op->common.parent->common.
 						  aml_opcode), op));
 
-	return_VALUE(FALSE);
+	return_UINT8(FALSE);
 }
 
 /*******************************************************************************
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 44f8325..e2199a9 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -693,7 +693,7 @@
 		default:
 
 			ACPI_ERROR((AE_INFO,
-				    "Unimplemented opcode, class=0x%X type=0x%X Opcode=-0x%X Op=%p",
+				    "Unimplemented opcode, class=0x%X type=0x%X Opcode=0x%X Op=%p",
 				    op_class, op_type, op->common.aml_opcode,
 				    op));
 
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index b8ea0b2..83cd45f 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -257,6 +257,8 @@
  *
  * DESCRIPTION: Clears the status bit for the requested event, calls the
  *              handler that previously registered for the event.
+ *              NOTE: If there is no handler for the event, the event is
+ *              disabled to prevent further interrupts.
  *
  ******************************************************************************/
 
@@ -271,17 +273,17 @@
 				      status_register_id, ACPI_CLEAR_STATUS);
 
 	/*
-	 * Make sure we've got a handler. If not, report an error. The event is
-	 * disabled to prevent further interrupts.
+	 * Make sure that a handler exists. If not, report an error
+	 * and disable the event to prevent further interrupts.
 	 */
-	if (NULL == acpi_gbl_fixed_event_handlers[event].handler) {
+	if (!acpi_gbl_fixed_event_handlers[event].handler) {
 		(void)acpi_write_bit_register(acpi_gbl_fixed_event_info[event].
 					      enable_register_id,
 					      ACPI_DISABLE_EVENT);
 
 		ACPI_ERROR((AE_INFO,
-			    "No installed handler for fixed event [0x%08X]",
-			    event));
+			    "No installed handler for fixed event - %s (%u), disabling",
+			    acpi_ut_get_event_name(event), event));
 
 		return (ACPI_INTERRUPT_NOT_HANDLED);
 	}
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index b9adb9a..a493b52 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -707,7 +707,7 @@
 		if (ACPI_FAILURE(status)) {
 			ACPI_EXCEPTION((AE_INFO, status,
 					"Unable to clear GPE%02X", gpe_number));
-			return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
+			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
 		}
 	}
 
@@ -724,7 +724,7 @@
 	if (ACPI_FAILURE(status)) {
 		ACPI_EXCEPTION((AE_INFO, status,
 				"Unable to disable GPE%02X", gpe_number));
-		return_VALUE(ACPI_INTERRUPT_NOT_HANDLED);
+		return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
 	}
 
 	/*
@@ -784,7 +784,7 @@
 		break;
 	}
 
-	return_VALUE(ACPI_INTERRUPT_HANDLED);
+	return_UINT32(ACPI_INTERRUPT_HANDLED);
 }
 
 #endif				/* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index f4b43be..b905acf 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -89,7 +89,7 @@
 	 */
 	interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
 
-	return_VALUE(interrupt_handled);
+	return_UINT32(interrupt_handled);
 }
 
 /*******************************************************************************
@@ -120,7 +120,7 @@
 
 	interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
 
-	return_VALUE(interrupt_handled);
+	return_UINT32(interrupt_handled);
 }
 
 /******************************************************************************
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index ddffd68..ca5fba9 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -467,9 +467,9 @@
 		return_ACPI_STATUS(status);
 	}
 
-	/* Don't allow two handlers. */
+	/* Do not allow multiple handlers */
 
-	if (NULL != acpi_gbl_fixed_event_handlers[event].handler) {
+	if (acpi_gbl_fixed_event_handlers[event].handler) {
 		status = AE_ALREADY_EXISTS;
 		goto cleanup;
 	}
@@ -483,8 +483,9 @@
 	if (ACPI_SUCCESS(status))
 		status = acpi_enable_event(event, 0);
 	if (ACPI_FAILURE(status)) {
-		ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X",
-			      event));
+		ACPI_WARNING((AE_INFO,
+			      "Could not enable fixed event - %s (%u)",
+			      acpi_ut_get_event_name(event), event));
 
 		/* Remove the handler */
 
@@ -492,7 +493,8 @@
 		acpi_gbl_fixed_event_handlers[event].context = NULL;
 	} else {
 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-				  "Enabled fixed event %X, Handler=%p\n", event,
+				  "Enabled fixed event %s (%X), Handler=%p\n",
+				  acpi_ut_get_event_name(event), event,
 				  handler));
 	}
 
@@ -544,11 +546,12 @@
 
 	if (ACPI_FAILURE(status)) {
 		ACPI_WARNING((AE_INFO,
-			      "Could not write to fixed event enable register 0x%X",
-			      event));
+			      "Could not disable fixed event - %s (%u)",
+			      acpi_ut_get_event_name(event), event));
 	} else {
-		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
-				  event));
+		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+				  "Disabled fixed event - %s (%X)\n",
+				  acpi_ut_get_event_name(event), event));
 	}
 
 	(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index d6e4e42..7039606 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -74,6 +74,12 @@
 		return_ACPI_STATUS(AE_NO_ACPI_TABLES);
 	}
 
+	/* If the Hardware Reduced flag is set, machine is always in acpi mode */
+
+	if (acpi_gbl_reduced_hardware) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
 	/* Check current mode */
 
 	if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
@@ -126,6 +132,12 @@
 
 	ACPI_FUNCTION_TRACE(acpi_disable);
 
+	/* If the Hardware Reduced flag is set, machine is always in acpi mode */
+
+	if (acpi_gbl_reduced_hardware) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
 	if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) {
 		ACPI_DEBUG_PRINT((ACPI_DB_INIT,
 				  "System is already in legacy (non-ACPI) mode\n"));
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index e491e46..b0838a4 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -257,7 +257,7 @@
 	union acpi_operand_object *return_desc = NULL;
 	u64 index;
 	acpi_status status = AE_OK;
-	acpi_size length;
+	acpi_size length = 0;
 
 	ACPI_FUNCTION_TRACE_STR(ex_opcode_2A_1T_1R,
 				acpi_ps_get_opcode_name(walk_state->opcode));
@@ -320,7 +320,6 @@
 		 * NOTE: A length of zero is ok, and will create a zero-length, null
 		 *       terminated string.
 		 */
-		length = 0;
 		while ((length < operand[0]->buffer.length) &&
 		       (length < operand[1]->integer.value) &&
 		       (operand[0]->buffer.pointer[length])) {
@@ -376,6 +375,7 @@
 		case ACPI_TYPE_STRING:
 
 			if (index >= operand[0]->string.length) {
+				length = operand[0]->string.length;
 				status = AE_AML_STRING_LIMIT;
 			}
 
@@ -386,6 +386,7 @@
 		case ACPI_TYPE_BUFFER:
 
 			if (index >= operand[0]->buffer.length) {
+				length = operand[0]->buffer.length;
 				status = AE_AML_BUFFER_LIMIT;
 			}
 
@@ -396,6 +397,7 @@
 		case ACPI_TYPE_PACKAGE:
 
 			if (index >= operand[0]->package.count) {
+				length = operand[0]->package.count;
 				status = AE_AML_PACKAGE_LIMIT;
 			}
 
@@ -414,8 +416,9 @@
 
 		if (ACPI_FAILURE(status)) {
 			ACPI_EXCEPTION((AE_INFO, status,
-					"Index (0x%8.8X%8.8X) is beyond end of object",
-					ACPI_FORMAT_UINT64(index)));
+					"Index (0x%X%8.8X) is beyond end of object (length 0x%X)",
+					ACPI_FORMAT_UINT64(index),
+					(u32)length));
 			goto cleanup;
 		}
 
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index d6eab81..6b728ae 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -276,7 +276,7 @@
 		/* Invalid field access type */
 
 		ACPI_ERROR((AE_INFO, "Unknown field access type 0x%X", access));
-		return_VALUE(0);
+		return_UINT32(0);
 	}
 
 	if (obj_desc->common.type == ACPI_TYPE_BUFFER_FIELD) {
@@ -289,7 +289,7 @@
 	}
 
 	*return_byte_alignment = byte_alignment;
-	return_VALUE(bit_length);
+	return_UINT32(bit_length);
 }
 
 /*******************************************************************************
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index b205cbb..99dc7b2 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -340,7 +340,7 @@
 	/* u64 is unsigned, so we don't worry about a '-' prefix */
 
 	if (value == 0) {
-		return_VALUE(1);
+		return_UINT32(1);
 	}
 
 	current_value = value;
@@ -354,7 +354,7 @@
 		num_digits++;
 	}
 
-	return_VALUE(num_digits);
+	return_UINT32(num_digits);
 }
 
 /*******************************************************************************
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index deb3f61..579c3a5 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -66,6 +66,12 @@
 
 	ACPI_FUNCTION_TRACE(hw_set_mode);
 
+	/* If the Hardware Reduced flag is set, machine is always in acpi mode */
+
+	if (acpi_gbl_reduced_hardware) {
+		return_ACPI_STATUS(AE_OK);
+	}
+
 	/*
 	 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
 	 * system does not support mode transition.
@@ -146,23 +152,29 @@
 
 	ACPI_FUNCTION_TRACE(hw_get_mode);
 
+	/* If the Hardware Reduced flag is set, machine is always in acpi mode */
+
+	if (acpi_gbl_reduced_hardware) {
+		return_UINT32(ACPI_SYS_MODE_ACPI);
+	}
+
 	/*
 	 * ACPI 2.0 clarified that if SMI_CMD in FADT is zero,
 	 * system does not support mode transition.
 	 */
 	if (!acpi_gbl_FADT.smi_command) {
-		return_VALUE(ACPI_SYS_MODE_ACPI);
+		return_UINT32(ACPI_SYS_MODE_ACPI);
 	}
 
 	status = acpi_read_bit_register(ACPI_BITREG_SCI_ENABLE, &value);
 	if (ACPI_FAILURE(status)) {
-		return_VALUE(ACPI_SYS_MODE_LEGACY);
+		return_UINT32(ACPI_SYS_MODE_LEGACY);
 	}
 
 	if (value) {
-		return_VALUE(ACPI_SYS_MODE_ACPI);
+		return_UINT32(ACPI_SYS_MODE_ACPI);
 	} else {
-		return_VALUE(ACPI_SYS_MODE_LEGACY);
+		return_UINT32(ACPI_SYS_MODE_LEGACY);
 	}
 }
 
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
new file mode 100644
index 0000000..8f79a9d
--- /dev/null
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -0,0 +1,443 @@
+/******************************************************************************
+ *
+ * Module Name: nsconvert - Object conversions for objects returned by
+ *                          predefined methods
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2013, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "acinterp.h"
+#include "acpredef.h"
+#include "amlresrc.h"
+
+#define _COMPONENT          ACPI_NAMESPACE
+ACPI_MODULE_NAME("nsconvert")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_convert_to_integer
+ *
+ * PARAMETERS:  original_object     - Object to be converted
+ *              return_object       - Where the new converted object is returned
+ *
+ * RETURN:      Status. AE_OK if conversion was successful.
+ *
+ * DESCRIPTION: Attempt to convert a String/Buffer object to an Integer.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
+			   union acpi_operand_object **return_object)
+{
+	union acpi_operand_object *new_object;
+	acpi_status status;
+	u64 value = 0;
+	u32 i;
+
+	switch (original_object->common.type) {
+	case ACPI_TYPE_STRING:
+
+		/* String-to-Integer conversion */
+
+		status = acpi_ut_strtoul64(original_object->string.pointer,
+					   ACPI_ANY_BASE, &value);
+		if (ACPI_FAILURE(status)) {
+			return (status);
+		}
+		break;
+
+	case ACPI_TYPE_BUFFER:
+
+		/* Buffer-to-Integer conversion. Max buffer size is 64 bits. */
+
+		if (original_object->buffer.length > 8) {
+			return (AE_AML_OPERAND_TYPE);
+		}
+
+		/* Extract each buffer byte to create the integer */
+
+		for (i = 0; i < original_object->buffer.length; i++) {
+			value |=
+			    ((u64)original_object->buffer.
+			     pointer[i] << (i * 8));
+		}
+		break;
+
+	default:
+		return (AE_AML_OPERAND_TYPE);
+	}
+
+	new_object = acpi_ut_create_integer_object(value);
+	if (!new_object) {
+		return (AE_NO_MEMORY);
+	}
+
+	*return_object = new_object;
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_convert_to_string
+ *
+ * PARAMETERS:  original_object     - Object to be converted
+ *              return_object       - Where the new converted object is returned
+ *
+ * RETURN:      Status. AE_OK if conversion was successful.
+ *
+ * DESCRIPTION: Attempt to convert a Integer/Buffer object to a String.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_convert_to_string(union acpi_operand_object *original_object,
+			  union acpi_operand_object **return_object)
+{
+	union acpi_operand_object *new_object;
+	acpi_size length;
+	acpi_status status;
+
+	switch (original_object->common.type) {
+	case ACPI_TYPE_INTEGER:
+		/*
+		 * Integer-to-String conversion. Commonly, convert
+		 * an integer of value 0 to a NULL string. The last element of
+		 * _BIF and _BIX packages occasionally need this fix.
+		 */
+		if (original_object->integer.value == 0) {
+
+			/* Allocate a new NULL string object */
+
+			new_object = acpi_ut_create_string_object(0);
+			if (!new_object) {
+				return (AE_NO_MEMORY);
+			}
+		} else {
+			status =
+			    acpi_ex_convert_to_string(original_object,
+						      &new_object,
+						      ACPI_IMPLICIT_CONVERT_HEX);
+			if (ACPI_FAILURE(status)) {
+				return (status);
+			}
+		}
+		break;
+
+	case ACPI_TYPE_BUFFER:
+		/*
+		 * Buffer-to-String conversion. Use a to_string
+		 * conversion, no transform performed on the buffer data. The best
+		 * example of this is the _BIF method, where the string data from
+		 * the battery is often (incorrectly) returned as buffer object(s).
+		 */
+		length = 0;
+		while ((length < original_object->buffer.length) &&
+		       (original_object->buffer.pointer[length])) {
+			length++;
+		}
+
+		/* Allocate a new string object */
+
+		new_object = acpi_ut_create_string_object(length);
+		if (!new_object) {
+			return (AE_NO_MEMORY);
+		}
+
+		/*
+		 * Copy the raw buffer data with no transform. String is already NULL
+		 * terminated at Length+1.
+		 */
+		ACPI_MEMCPY(new_object->string.pointer,
+			    original_object->buffer.pointer, length);
+		break;
+
+	default:
+		return (AE_AML_OPERAND_TYPE);
+	}
+
+	*return_object = new_object;
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_convert_to_buffer
+ *
+ * PARAMETERS:  original_object     - Object to be converted
+ *              return_object       - Where the new converted object is returned
+ *
+ * RETURN:      Status. AE_OK if conversion was successful.
+ *
+ * DESCRIPTION: Attempt to convert a Integer/String/Package object to a Buffer.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
+			  union acpi_operand_object **return_object)
+{
+	union acpi_operand_object *new_object;
+	acpi_status status;
+	union acpi_operand_object **elements;
+	u32 *dword_buffer;
+	u32 count;
+	u32 i;
+
+	switch (original_object->common.type) {
+	case ACPI_TYPE_INTEGER:
+		/*
+		 * Integer-to-Buffer conversion.
+		 * Convert the Integer to a packed-byte buffer. _MAT and other
+		 * objects need this sometimes, if a read has been performed on a
+		 * Field object that is less than or equal to the global integer
+		 * size (32 or 64 bits).
+		 */
+		status =
+		    acpi_ex_convert_to_buffer(original_object, &new_object);
+		if (ACPI_FAILURE(status)) {
+			return (status);
+		}
+		break;
+
+	case ACPI_TYPE_STRING:
+
+		/* String-to-Buffer conversion. Simple data copy */
+
+		new_object =
+		    acpi_ut_create_buffer_object(original_object->string.
+						 length);
+		if (!new_object) {
+			return (AE_NO_MEMORY);
+		}
+
+		ACPI_MEMCPY(new_object->buffer.pointer,
+			    original_object->string.pointer,
+			    original_object->string.length);
+		break;
+
+	case ACPI_TYPE_PACKAGE:
+		/*
+		 * This case is often seen for predefined names that must return a
+		 * Buffer object with multiple DWORD integers within. For example,
+		 * _FDE and _GTM. The Package can be converted to a Buffer.
+		 */
+
+		/* All elements of the Package must be integers */
+
+		elements = original_object->package.elements;
+		count = original_object->package.count;
+
+		for (i = 0; i < count; i++) {
+			if ((!*elements) ||
+			    ((*elements)->common.type != ACPI_TYPE_INTEGER)) {
+				return (AE_AML_OPERAND_TYPE);
+			}
+			elements++;
+		}
+
+		/* Create the new buffer object to replace the Package */
+
+		new_object = acpi_ut_create_buffer_object(ACPI_MUL_4(count));
+		if (!new_object) {
+			return (AE_NO_MEMORY);
+		}
+
+		/* Copy the package elements (integers) to the buffer as DWORDs */
+
+		elements = original_object->package.elements;
+		dword_buffer = ACPI_CAST_PTR(u32, new_object->buffer.pointer);
+
+		for (i = 0; i < count; i++) {
+			*dword_buffer = (u32)(*elements)->integer.value;
+			dword_buffer++;
+			elements++;
+		}
+		break;
+
+	default:
+		return (AE_AML_OPERAND_TYPE);
+	}
+
+	*return_object = new_object;
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_convert_to_unicode
+ *
+ * PARAMETERS:  original_object     - ASCII String Object to be converted
+ *              return_object       - Where the new converted object is returned
+ *
+ * RETURN:      Status. AE_OK if conversion was successful.
+ *
+ * DESCRIPTION: Attempt to convert a String object to a Unicode string Buffer.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_convert_to_unicode(union acpi_operand_object *original_object,
+			   union acpi_operand_object **return_object)
+{
+	union acpi_operand_object *new_object;
+	char *ascii_string;
+	u16 *unicode_buffer;
+	u32 unicode_length;
+	u32 i;
+
+	if (!original_object) {
+		return (AE_OK);
+	}
+
+	/* If a Buffer was returned, it must be at least two bytes long */
+
+	if (original_object->common.type == ACPI_TYPE_BUFFER) {
+		if (original_object->buffer.length < 2) {
+			return (AE_AML_OPERAND_VALUE);
+		}
+
+		*return_object = NULL;
+		return (AE_OK);
+	}
+
+	/*
+	 * The original object is an ASCII string. Convert this string to
+	 * a unicode buffer.
+	 */
+	ascii_string = original_object->string.pointer;
+	unicode_length = (original_object->string.length * 2) + 2;
+
+	/* Create a new buffer object for the Unicode data */
+
+	new_object = acpi_ut_create_buffer_object(unicode_length);
+	if (!new_object) {
+		return (AE_NO_MEMORY);
+	}
+
+	unicode_buffer = ACPI_CAST_PTR(u16, new_object->buffer.pointer);
+
+	/* Convert ASCII to Unicode */
+
+	for (i = 0; i < original_object->string.length; i++) {
+		unicode_buffer[i] = (u16)ascii_string[i];
+	}
+
+	*return_object = new_object;
+	return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_convert_to_resource
+ *
+ * PARAMETERS:  original_object     - Object to be converted
+ *              return_object       - Where the new converted object is returned
+ *
+ * RETURN:      Status. AE_OK if conversion was successful
+ *
+ * DESCRIPTION: Attempt to convert a Integer object to a resource_template
+ *              Buffer.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ns_convert_to_resource(union acpi_operand_object *original_object,
+			    union acpi_operand_object **return_object)
+{
+	union acpi_operand_object *new_object;
+	u8 *buffer;
+
+	/*
+	 * We can fix the following cases for an expected resource template:
+	 * 1. No return value (interpreter slack mode is disabled)
+	 * 2. A "Return (Zero)" statement
+	 * 3. A "Return empty buffer" statement
+	 *
+	 * We will return a buffer containing a single end_tag
+	 * resource descriptor.
+	 */
+	if (original_object) {
+		switch (original_object->common.type) {
+		case ACPI_TYPE_INTEGER:
+
+			/* We can only repair an Integer==0 */
+
+			if (original_object->integer.value) {
+				return (AE_AML_OPERAND_TYPE);
+			}
+			break;
+
+		case ACPI_TYPE_BUFFER:
+
+			if (original_object->buffer.length) {
+
+				/* Additional checks can be added in the future */
+
+				*return_object = NULL;
+				return (AE_OK);
+			}
+			break;
+
+		case ACPI_TYPE_STRING:
+		default:
+
+			return (AE_AML_OPERAND_TYPE);
+		}
+	}
+
+	/* Create the new buffer object for the resource descriptor */
+
+	new_object = acpi_ut_create_buffer_object(2);
+	if (!new_object) {
+		return (AE_NO_MEMORY);
+	}
+
+	buffer = ACPI_CAST_PTR(u8, new_object->buffer.pointer);
+
+	/* Initialize the Buffer with a single end_tag descriptor */
+
+	buffer[0] = (ACPI_RESOURCE_NAME_END_TAG | ASL_RDESC_END_TAG_SIZE);
+	buffer[1] = 0x00;
+
+	*return_object = new_object;
+	return (AE_OK);
+}
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 1538f3eb..b61db69 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -98,17 +98,21 @@
 	info->return_object = NULL;
 	info->param_count = 0;
 
-	/*
-	 * Get the actual namespace node for the target object. Handles these cases:
-	 *
-	 * 1) Null node, Pathname (absolute path)
-	 * 2) Node, Pathname (path relative to Node)
-	 * 3) Node, Null Pathname
-	 */
-	status = acpi_ns_get_node(info->prefix_node, info->pathname,
-				  ACPI_NS_NO_UPSEARCH, &info->resolved_node);
-	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
+	if (!info->resolved_node) {
+		/*
+		 * Get the actual namespace node for the target object if we need to.
+		 * Handles these cases:
+		 *
+		 * 1) Null node, Pathname (absolute path)
+		 * 2) Node, Pathname (path relative to Node)
+		 * 3) Node, Null Pathname
+		 */
+		status = acpi_ns_get_node(info->prefix_node, info->pathname,
+					  ACPI_NS_NO_UPSEARCH,
+					  &info->resolved_node);
+		if (ACPI_FAILURE(status)) {
+			return_ACPI_STATUS(status);
+		}
 	}
 
 	/*
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 224c300..8a52916 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -76,19 +76,7 @@
 acpi_ns_check_reference(struct acpi_predefined_data *data,
 			union acpi_operand_object *return_object);
 
-static void acpi_ns_get_expected_types(char *buffer, u32 expected_btypes);
-
-/*
- * Names for the types that can be returned by the predefined objects.
- * Used for warning messages. Must be in the same order as the ACPI_RTYPEs
- */
-static const char *acpi_rtype_names[] = {
-	"/Integer",
-	"/String",
-	"/Buffer",
-	"/Package",
-	"/Reference",
-};
+static u32 acpi_ns_get_bitmapped_type(union acpi_operand_object *return_object);
 
 /*******************************************************************************
  *
@@ -112,7 +100,6 @@
 			       acpi_status return_status,
 			       union acpi_operand_object **return_object_ptr)
 {
-	union acpi_operand_object *return_object = *return_object_ptr;
 	acpi_status status = AE_OK;
 	const union acpi_predefined_info *predefined;
 	char *pathname;
@@ -120,7 +107,7 @@
 
 	/* Match the name for this method/object against the predefined list */
 
-	predefined = acpi_ns_check_for_predefined_name(node);
+	predefined = acpi_ut_match_predefined_method(node->name.ascii);
 
 	/* Get the full pathname to the object, for use in warning messages */
 
@@ -152,25 +139,6 @@
 	}
 
 	/*
-	 * If there is no return value, check if we require a return value for
-	 * this predefined name. Either one return value is expected, or none,
-	 * for both methods and other objects.
-	 *
-	 * Exit now if there is no return object. Warning if one was expected.
-	 */
-	if (!return_object) {
-		if ((predefined->info.expected_btypes) &&
-		    (!(predefined->info.expected_btypes & ACPI_RTYPE_NONE))) {
-			ACPI_WARN_PREDEFINED((AE_INFO, pathname,
-					      ACPI_WARN_ALWAYS,
-					      "Missing expected return value"));
-
-			status = AE_AML_NO_RETURN_VALUE;
-		}
-		goto cleanup;
-	}
-
-	/*
 	 * Return value validation and possible repair.
 	 *
 	 * 1) Don't perform return value validation/repair if this feature
@@ -310,8 +278,10 @@
 	 * Validate the user-supplied parameter count.
 	 * Allow two different legal argument counts (_SCP, etc.)
 	 */
-	required_params_current = predefined->info.param_count & 0x0F;
-	required_params_old = predefined->info.param_count >> 4;
+	required_params_current =
+	    predefined->info.argument_list & METHOD_ARG_MASK;
+	required_params_old =
+	    predefined->info.argument_list >> METHOD_ARG_BIT_WIDTH;
 
 	if (user_param_count != ACPI_UINT32_MAX) {
 		if ((user_param_count != required_params_current) &&
@@ -340,52 +310,6 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_ns_check_for_predefined_name
- *
- * PARAMETERS:  node            - Namespace node for the method/object
- *
- * RETURN:      Pointer to entry in predefined table. NULL indicates not found.
- *
- * DESCRIPTION: Check an object name against the predefined object list.
- *
- ******************************************************************************/
-
-const union acpi_predefined_info *acpi_ns_check_for_predefined_name(struct
-								    acpi_namespace_node
-								    *node)
-{
-	const union acpi_predefined_info *this_name;
-
-	/* Quick check for a predefined name, first character must be underscore */
-
-	if (node->name.ascii[0] != '_') {
-		return (NULL);
-	}
-
-	/* Search info table for a predefined method/object name */
-
-	this_name = predefined_names;
-	while (this_name->info.name[0]) {
-		if (ACPI_COMPARE_NAME(node->name.ascii, this_name->info.name)) {
-			return (this_name);
-		}
-
-		/*
-		 * Skip next entry in the table if this name returns a Package
-		 * (next entry contains the package info)
-		 */
-		if (this_name->info.expected_btypes & ACPI_RTYPE_PACKAGE) {
-			this_name++;
-		}
-
-		this_name++;
-	}
-
-	return (NULL);		/* Not found */
-}
-
-/*******************************************************************************
- *
  * FUNCTION:    acpi_ns_check_object_type
  *
  * PARAMETERS:  data            - Pointer to validation data structure
@@ -410,28 +334,12 @@
 {
 	union acpi_operand_object *return_object = *return_object_ptr;
 	acpi_status status = AE_OK;
-	u32 return_btype;
 	char type_buffer[48];	/* Room for 5 types */
 
-	/*
-	 * If we get a NULL return_object here, it is a NULL package element.
-	 * Since all extraneous NULL package elements were removed earlier by a
-	 * call to acpi_ns_remove_null_elements, this is an unexpected NULL element.
-	 * We will attempt to repair it.
-	 */
-	if (!return_object) {
-		status = acpi_ns_repair_null_element(data, expected_btypes,
-						     package_index,
-						     return_object_ptr);
-		if (ACPI_SUCCESS(status)) {
-			return (AE_OK);	/* Repair was successful */
-		}
-		goto type_error_exit;
-	}
-
 	/* A Namespace node should not get here, but make sure */
 
-	if (ACPI_GET_DESCRIPTOR_TYPE(return_object) == ACPI_DESC_TYPE_NAMED) {
+	if (return_object &&
+	    ACPI_GET_DESCRIPTOR_TYPE(return_object) == ACPI_DESC_TYPE_NAMED) {
 		ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
 				      "Invalid return type - Found a Namespace node [%4.4s] type %s",
 				      return_object->node.name.ascii,
@@ -448,59 +356,31 @@
 	 * from all of the predefined names (including elements of returned
 	 * packages)
 	 */
-	switch (return_object->common.type) {
-	case ACPI_TYPE_INTEGER:
-		return_btype = ACPI_RTYPE_INTEGER;
-		break;
+	data->return_btype = acpi_ns_get_bitmapped_type(return_object);
+	if (data->return_btype == ACPI_RTYPE_ANY) {
 
-	case ACPI_TYPE_BUFFER:
-		return_btype = ACPI_RTYPE_BUFFER;
-		break;
-
-	case ACPI_TYPE_STRING:
-		return_btype = ACPI_RTYPE_STRING;
-		break;
-
-	case ACPI_TYPE_PACKAGE:
-		return_btype = ACPI_RTYPE_PACKAGE;
-		break;
-
-	case ACPI_TYPE_LOCAL_REFERENCE:
-		return_btype = ACPI_RTYPE_REFERENCE;
-		break;
-
-	default:
 		/* Not one of the supported objects, must be incorrect */
-
 		goto type_error_exit;
 	}
 
-	/* Is the object one of the expected types? */
+	/* For reference objects, check that the reference type is correct */
 
-	if (return_btype & expected_btypes) {
-
-		/* For reference objects, check that the reference type is correct */
-
-		if (return_object->common.type == ACPI_TYPE_LOCAL_REFERENCE) {
-			status = acpi_ns_check_reference(data, return_object);
-		}
-
+	if ((data->return_btype & expected_btypes) == ACPI_RTYPE_REFERENCE) {
+		status = acpi_ns_check_reference(data, return_object);
 		return (status);
 	}
 
-	/* Type mismatch -- attempt repair of the returned object */
+	/* Attempt simple repair of the returned object if necessary */
 
-	status = acpi_ns_repair_object(data, expected_btypes,
+	status = acpi_ns_simple_repair(data, expected_btypes,
 				       package_index, return_object_ptr);
-	if (ACPI_SUCCESS(status)) {
-		return (AE_OK);	/* Repair was successful */
-	}
+	return (status);
 
       type_error_exit:
 
 	/* Create a string with all expected types for this predefined object */
 
-	acpi_ns_get_expected_types(type_buffer, expected_btypes);
+	acpi_ut_get_expected_return_types(type_buffer, expected_btypes);
 
 	if (package_index == ACPI_NOT_PACKAGE_ELEMENT) {
 		ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
@@ -558,36 +438,55 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_ns_get_expected_types
+ * FUNCTION:    acpi_ns_get_bitmapped_type
  *
- * PARAMETERS:  buffer          - Pointer to where the string is returned
- *              expected_btypes - Bitmap of expected return type(s)
+ * PARAMETERS:  return_object   - Object returned from method/obj evaluation
  *
- * RETURN:      Buffer is populated with type names.
+ * RETURN:      Object return type. ACPI_RTYPE_ANY indicates that the object
+ *              type is not supported. ACPI_RTYPE_NONE indicates that no
+ *              object was returned (return_object is NULL).
  *
- * DESCRIPTION: Translate the expected types bitmap into a string of ascii
- *              names of expected types, for use in warning messages.
+ * DESCRIPTION: Convert object type into a bitmapped object return type.
  *
  ******************************************************************************/
 
-static void acpi_ns_get_expected_types(char *buffer, u32 expected_btypes)
+static u32 acpi_ns_get_bitmapped_type(union acpi_operand_object *return_object)
 {
-	u32 this_rtype;
-	u32 i;
-	u32 j;
+	u32 return_btype;
 
-	j = 1;
-	buffer[0] = 0;
-	this_rtype = ACPI_RTYPE_INTEGER;
-
-	for (i = 0; i < ACPI_NUM_RTYPES; i++) {
-
-		/* If one of the expected types, concatenate the name of this type */
-
-		if (expected_btypes & this_rtype) {
-			ACPI_STRCAT(buffer, &acpi_rtype_names[i][j]);
-			j = 0;	/* Use name separator from now on */
-		}
-		this_rtype <<= 1;	/* Next Rtype */
+	if (!return_object) {
+		return (ACPI_RTYPE_NONE);
 	}
+
+	/* Map acpi_object_type to internal bitmapped type */
+
+	switch (return_object->common.type) {
+	case ACPI_TYPE_INTEGER:
+		return_btype = ACPI_RTYPE_INTEGER;
+		break;
+
+	case ACPI_TYPE_BUFFER:
+		return_btype = ACPI_RTYPE_BUFFER;
+		break;
+
+	case ACPI_TYPE_STRING:
+		return_btype = ACPI_RTYPE_STRING;
+		break;
+
+	case ACPI_TYPE_PACKAGE:
+		return_btype = ACPI_RTYPE_PACKAGE;
+		break;
+
+	case ACPI_TYPE_LOCAL_REFERENCE:
+		return_btype = ACPI_RTYPE_REFERENCE;
+		break;
+
+	default:
+		/* Not one of the supported objects, must be incorrect */
+
+		return_btype = ACPI_RTYPE_ANY;
+		break;
+	}
+
+	return (return_btype);
 }
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index a401554..77cdd53 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -112,9 +112,15 @@
 	elements = return_object->package.elements;
 	count = return_object->package.count;
 
-	/* The package must have at least one element, else invalid */
-
+	/*
+	 * Most packages must have at least one element. The only exception
+	 * is the variable-length package (ACPI_PTYPE1_VAR).
+	 */
 	if (!count) {
+		if (package->ret_info.type == ACPI_PTYPE1_VAR) {
+			return (AE_OK);
+		}
+
 		ACPI_WARN_PREDEFINED((AE_INFO, data->pathname, data->node_flags,
 				      "Return Package has no elements (empty)"));
 
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 9e83335..18f02e4 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -46,6 +46,7 @@
 #include "acnamesp.h"
 #include "acinterp.h"
 #include "acpredef.h"
+#include "amlresrc.h"
 
 #define _COMPONENT          ACPI_NAMESPACE
 ACPI_MODULE_NAME("nsrepair")
@@ -71,6 +72,11 @@
  * Buffer  -> String
  * Buffer  -> Package of Integers
  * Package -> Package of one Package
+ *
+ * Additional conversions that are available:
+ *  Convert a null return or zero return value to an end_tag descriptor
+ *  Convert an ASCII string to a Unicode buffer
+ *
  * An incorrect standalone object is wrapped with required outer package
  *
  * Additional possible repairs:
@@ -78,21 +84,51 @@
  *
  ******************************************************************************/
 /* Local prototypes */
-static acpi_status
-acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
-			   union acpi_operand_object **return_object);
+static const struct acpi_simple_repair_info *acpi_ns_match_simple_repair(struct
+									 acpi_namespace_node
+									 *node,
+									 u32
+									 return_btype,
+									 u32
+									 package_index);
 
-static acpi_status
-acpi_ns_convert_to_string(union acpi_operand_object *original_object,
-			  union acpi_operand_object **return_object);
+/*
+ * Special but simple repairs for some names.
+ *
+ * 2nd argument: Unexpected types that can be repaired
+ */
+static const struct acpi_simple_repair_info acpi_object_repair_info[] = {
+	/* Resource descriptor conversions */
 
-static acpi_status
-acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
-			  union acpi_operand_object **return_object);
+	{"_CRS",
+	 ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER |
+	 ACPI_RTYPE_NONE,
+	 ACPI_NOT_PACKAGE_ELEMENT,
+	 acpi_ns_convert_to_resource},
+	{"_DMA",
+	 ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER |
+	 ACPI_RTYPE_NONE,
+	 ACPI_NOT_PACKAGE_ELEMENT,
+	 acpi_ns_convert_to_resource},
+	{"_PRS",
+	 ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER |
+	 ACPI_RTYPE_NONE,
+	 ACPI_NOT_PACKAGE_ELEMENT,
+	 acpi_ns_convert_to_resource},
+
+	/* Unicode conversions */
+
+	{"_MLS", ACPI_RTYPE_STRING, 1,
+	 acpi_ns_convert_to_unicode},
+	{"_STR", ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER,
+	 ACPI_NOT_PACKAGE_ELEMENT,
+	 acpi_ns_convert_to_unicode},
+	{{0, 0, 0, 0}, 0, 0, NULL}	/* Table terminator */
+};
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_ns_repair_object
+ * FUNCTION:    acpi_ns_simple_repair
  *
  * PARAMETERS:  data                - Pointer to validation data structure
  *              expected_btypes     - Object types expected
@@ -110,16 +146,54 @@
  ******************************************************************************/
 
 acpi_status
-acpi_ns_repair_object(struct acpi_predefined_data *data,
+acpi_ns_simple_repair(struct acpi_predefined_data *data,
 		      u32 expected_btypes,
 		      u32 package_index,
 		      union acpi_operand_object **return_object_ptr)
 {
 	union acpi_operand_object *return_object = *return_object_ptr;
-	union acpi_operand_object *new_object;
+	union acpi_operand_object *new_object = NULL;
 	acpi_status status;
+	const struct acpi_simple_repair_info *predefined;
 
-	ACPI_FUNCTION_NAME(ns_repair_object);
+	ACPI_FUNCTION_NAME(ns_simple_repair);
+
+	/*
+	 * Special repairs for certain names that are in the repair table.
+	 * Check if this name is in the list of repairable names.
+	 */
+	predefined = acpi_ns_match_simple_repair(data->node,
+						 data->return_btype,
+						 package_index);
+	if (predefined) {
+		if (!return_object) {
+			ACPI_WARN_PREDEFINED((AE_INFO, data->pathname,
+					      ACPI_WARN_ALWAYS,
+					      "Missing expected return value"));
+		}
+
+		status =
+		    predefined->object_converter(return_object, &new_object);
+		if (ACPI_FAILURE(status)) {
+
+			/* A fatal error occurred during a conversion */
+
+			ACPI_EXCEPTION((AE_INFO, status,
+					"During return object analysis"));
+			return (status);
+		}
+		if (new_object) {
+			goto object_repaired;
+		}
+	}
+
+	/*
+	 * Do not perform simple object repair unless the return type is not
+	 * expected.
+	 */
+	if (data->return_btype & expected_btypes) {
+		return (AE_OK);
+	}
 
 	/*
 	 * At this point, we know that the type of the returned object was not
@@ -127,6 +201,24 @@
 	 * repair the object by converting it to one of the expected object
 	 * types for this predefined name.
 	 */
+
+	/*
+	 * If there is no return value, check if we require a return value for
+	 * this predefined name. Either one return value is expected, or none,
+	 * for both methods and other objects.
+	 *
+	 * Exit now if there is no return object. Warning if one was expected.
+	 */
+	if (!return_object) {
+		if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) {
+			ACPI_WARN_PREDEFINED((AE_INFO, data->pathname,
+					      ACPI_WARN_ALWAYS,
+					      "Missing expected return value"));
+
+			return (AE_AML_NO_RETURN_VALUE);
+		}
+	}
+
 	if (expected_btypes & ACPI_RTYPE_INTEGER) {
 		status = acpi_ns_convert_to_integer(return_object, &new_object);
 		if (ACPI_SUCCESS(status)) {
@@ -216,254 +308,51 @@
 	return (AE_OK);
 }
 
-/*******************************************************************************
+/******************************************************************************
  *
- * FUNCTION:    acpi_ns_convert_to_integer
+ * FUNCTION:    acpi_ns_match_simple_repair
  *
- * PARAMETERS:  original_object     - Object to be converted
- *              return_object       - Where the new converted object is returned
+ * PARAMETERS:  node                - Namespace node for the method/object
+ *              return_btype        - Object type that was returned
+ *              package_index       - Index of object within parent package (if
+ *                                    applicable - ACPI_NOT_PACKAGE_ELEMENT
+ *                                    otherwise)
  *
- * RETURN:      Status. AE_OK if conversion was successful.
+ * RETURN:      Pointer to entry in repair table. NULL indicates not found.
  *
- * DESCRIPTION: Attempt to convert a String/Buffer object to an Integer.
+ * DESCRIPTION: Check an object name against the repairable object list.
  *
- ******************************************************************************/
+ *****************************************************************************/
 
-static acpi_status
-acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
-			   union acpi_operand_object **return_object)
+static const struct acpi_simple_repair_info *acpi_ns_match_simple_repair(struct
+									 acpi_namespace_node
+									 *node,
+									 u32
+									 return_btype,
+									 u32
+									 package_index)
 {
-	union acpi_operand_object *new_object;
-	acpi_status status;
-	u64 value = 0;
-	u32 i;
+	const struct acpi_simple_repair_info *this_name;
 
-	switch (original_object->common.type) {
-	case ACPI_TYPE_STRING:
+	/* Search info table for a repairable predefined method/object name */
 
-		/* String-to-Integer conversion */
+	this_name = acpi_object_repair_info;
+	while (this_name->object_converter) {
+		if (ACPI_COMPARE_NAME(node->name.ascii, this_name->name)) {
 
-		status = acpi_ut_strtoul64(original_object->string.pointer,
-					   ACPI_ANY_BASE, &value);
-		if (ACPI_FAILURE(status)) {
-			return (status);
-		}
-		break;
+			/* Check if we can actually repair this name/type combination */
 
-	case ACPI_TYPE_BUFFER:
-
-		/* Buffer-to-Integer conversion. Max buffer size is 64 bits. */
-
-		if (original_object->buffer.length > 8) {
-			return (AE_AML_OPERAND_TYPE);
-		}
-
-		/* Extract each buffer byte to create the integer */
-
-		for (i = 0; i < original_object->buffer.length; i++) {
-			value |=
-			    ((u64) original_object->buffer.
-			     pointer[i] << (i * 8));
-		}
-		break;
-
-	default:
-		return (AE_AML_OPERAND_TYPE);
-	}
-
-	new_object = acpi_ut_create_integer_object(value);
-	if (!new_object) {
-		return (AE_NO_MEMORY);
-	}
-
-	*return_object = new_object;
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_convert_to_string
- *
- * PARAMETERS:  original_object     - Object to be converted
- *              return_object       - Where the new converted object is returned
- *
- * RETURN:      Status. AE_OK if conversion was successful.
- *
- * DESCRIPTION: Attempt to convert a Integer/Buffer object to a String.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_convert_to_string(union acpi_operand_object *original_object,
-			  union acpi_operand_object **return_object)
-{
-	union acpi_operand_object *new_object;
-	acpi_size length;
-	acpi_status status;
-
-	switch (original_object->common.type) {
-	case ACPI_TYPE_INTEGER:
-		/*
-		 * Integer-to-String conversion. Commonly, convert
-		 * an integer of value 0 to a NULL string. The last element of
-		 * _BIF and _BIX packages occasionally need this fix.
-		 */
-		if (original_object->integer.value == 0) {
-
-			/* Allocate a new NULL string object */
-
-			new_object = acpi_ut_create_string_object(0);
-			if (!new_object) {
-				return (AE_NO_MEMORY);
+			if ((return_btype & this_name->unexpected_btypes) &&
+			    (package_index == this_name->package_index)) {
+				return (this_name);
 			}
-		} else {
-			status =
-			    acpi_ex_convert_to_string(original_object,
-						      &new_object,
-						      ACPI_IMPLICIT_CONVERT_HEX);
-			if (ACPI_FAILURE(status)) {
-				return (status);
-			}
+
+			return (NULL);
 		}
-		break;
-
-	case ACPI_TYPE_BUFFER:
-		/*
-		 * Buffer-to-String conversion. Use a to_string
-		 * conversion, no transform performed on the buffer data. The best
-		 * example of this is the _BIF method, where the string data from
-		 * the battery is often (incorrectly) returned as buffer object(s).
-		 */
-		length = 0;
-		while ((length < original_object->buffer.length) &&
-		       (original_object->buffer.pointer[length])) {
-			length++;
-		}
-
-		/* Allocate a new string object */
-
-		new_object = acpi_ut_create_string_object(length);
-		if (!new_object) {
-			return (AE_NO_MEMORY);
-		}
-
-		/*
-		 * Copy the raw buffer data with no transform. String is already NULL
-		 * terminated at Length+1.
-		 */
-		ACPI_MEMCPY(new_object->string.pointer,
-			    original_object->buffer.pointer, length);
-		break;
-
-	default:
-		return (AE_AML_OPERAND_TYPE);
+		this_name++;
 	}
 
-	*return_object = new_object;
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ns_convert_to_buffer
- *
- * PARAMETERS:  original_object     - Object to be converted
- *              return_object       - Where the new converted object is returned
- *
- * RETURN:      Status. AE_OK if conversion was successful.
- *
- * DESCRIPTION: Attempt to convert a Integer/String/Package object to a Buffer.
- *
- ******************************************************************************/
-
-static acpi_status
-acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
-			  union acpi_operand_object **return_object)
-{
-	union acpi_operand_object *new_object;
-	acpi_status status;
-	union acpi_operand_object **elements;
-	u32 *dword_buffer;
-	u32 count;
-	u32 i;
-
-	switch (original_object->common.type) {
-	case ACPI_TYPE_INTEGER:
-		/*
-		 * Integer-to-Buffer conversion.
-		 * Convert the Integer to a packed-byte buffer. _MAT and other
-		 * objects need this sometimes, if a read has been performed on a
-		 * Field object that is less than or equal to the global integer
-		 * size (32 or 64 bits).
-		 */
-		status =
-		    acpi_ex_convert_to_buffer(original_object, &new_object);
-		if (ACPI_FAILURE(status)) {
-			return (status);
-		}
-		break;
-
-	case ACPI_TYPE_STRING:
-
-		/* String-to-Buffer conversion. Simple data copy */
-
-		new_object =
-		    acpi_ut_create_buffer_object(original_object->string.
-						 length);
-		if (!new_object) {
-			return (AE_NO_MEMORY);
-		}
-
-		ACPI_MEMCPY(new_object->buffer.pointer,
-			    original_object->string.pointer,
-			    original_object->string.length);
-		break;
-
-	case ACPI_TYPE_PACKAGE:
-		/*
-		 * This case is often seen for predefined names that must return a
-		 * Buffer object with multiple DWORD integers within. For example,
-		 * _FDE and _GTM. The Package can be converted to a Buffer.
-		 */
-
-		/* All elements of the Package must be integers */
-
-		elements = original_object->package.elements;
-		count = original_object->package.count;
-
-		for (i = 0; i < count; i++) {
-			if ((!*elements) ||
-			    ((*elements)->common.type != ACPI_TYPE_INTEGER)) {
-				return (AE_AML_OPERAND_TYPE);
-			}
-			elements++;
-		}
-
-		/* Create the new buffer object to replace the Package */
-
-		new_object = acpi_ut_create_buffer_object(ACPI_MUL_4(count));
-		if (!new_object) {
-			return (AE_NO_MEMORY);
-		}
-
-		/* Copy the package elements (integers) to the buffer as DWORDs */
-
-		elements = original_object->package.elements;
-		dword_buffer = ACPI_CAST_PTR(u32, new_object->buffer.pointer);
-
-		for (i = 0; i < count; i++) {
-			*dword_buffer = (u32) (*elements)->integer.value;
-			dword_buffer++;
-			elements++;
-		}
-		break;
-
-	default:
-		return (AE_AML_OPERAND_TYPE);
-	}
-
-	*return_object = new_object;
-	return (AE_OK);
+	return (NULL);		/* Name was not found in the repair table */
 }
 
 /*******************************************************************************
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index ba4d982..149e9b9 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -66,9 +66,9 @@
 
 /* Local prototypes */
 
-static const struct acpi_repair_info *acpi_ns_match_repairable_name(struct
-								    acpi_namespace_node
-								    *node);
+static const struct acpi_repair_info *acpi_ns_match_complex_repair(struct
+								   acpi_namespace_node
+								   *node);
 
 static acpi_status
 acpi_ns_repair_ALR(struct acpi_predefined_data *data,
@@ -175,7 +175,7 @@
 
 	/* Check if this name is in the list of repairable names */
 
-	predefined = acpi_ns_match_repairable_name(node);
+	predefined = acpi_ns_match_complex_repair(node);
 	if (!predefined) {
 		return (validate_status);
 	}
@@ -186,7 +186,7 @@
 
 /******************************************************************************
  *
- * FUNCTION:    acpi_ns_match_repairable_name
+ * FUNCTION:    acpi_ns_match_complex_repair
  *
  * PARAMETERS:  node                - Namespace node for the method/object
  *
@@ -196,9 +196,9 @@
  *
  *****************************************************************************/
 
-static const struct acpi_repair_info *acpi_ns_match_repairable_name(struct
-								    acpi_namespace_node
-								    *node)
+static const struct acpi_repair_info *acpi_ns_match_complex_repair(struct
+								   acpi_namespace_node
+								   *node)
 {
 	const struct acpi_repair_info *this_name;
 
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 686420d..2808586 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -112,10 +112,10 @@
 
 	if (!node) {
 		ACPI_WARNING((AE_INFO, "Null Node parameter"));
-		return_VALUE(ACPI_TYPE_ANY);
+		return_UINT8(ACPI_TYPE_ANY);
 	}
 
-	return_VALUE(node->type);
+	return_UINT8(node->type);
 }
 
 /*******************************************************************************
@@ -140,10 +140,10 @@
 		/* Type code out of range  */
 
 		ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
-		return_VALUE(ACPI_NS_NORMAL);
+		return_UINT32(ACPI_NS_NORMAL);
 	}
 
-	return_VALUE(acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL);
+	return_UINT32(acpi_gbl_ns_properties[type] & ACPI_NS_LOCAL);
 }
 
 /*******************************************************************************
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index f51308c..9f25a3d 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -108,7 +108,7 @@
 	/* Byte 0 is a special case, either bits [0:3] or [0:5] are used */
 
 	package_length |= (aml[0] & byte_zero_mask);
-	return_VALUE(package_length);
+	return_UINT32(package_length);
 }
 
 /*******************************************************************************
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 7816d4ee..72077fa 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -202,6 +202,12 @@
 			return_ACPI_STATUS(AE_AML_INVALID_RESOURCE_TYPE);
 		}
 
+		/* Sanity check the length. It must not be zero, or we loop forever */
+
+		if (!resource->length) {
+			return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH);
+		}
+
 		/* Get the base size of the (external stream) resource descriptor */
 
 		total_size = acpi_gbl_aml_resource_sizes[resource->type];
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index cab5144..b5fc0db 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -385,6 +385,14 @@
 			return;
 		}
 
+		/* Sanity check the length. It must not be zero, or we loop forever */
+
+		if (!resource_list->length) {
+			acpi_os_printf
+			    ("Invalid zero length descriptor in resource list\n");
+			return;
+		}
+
 		/* Dump the resource descriptor */
 
 		if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index ee2e206..6053aa1 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -178,6 +178,14 @@
 			return_ACPI_STATUS(AE_BAD_DATA);
 		}
 
+		/* Sanity check the length. It must not be zero, or we loop forever */
+
+		if (!resource->length) {
+			ACPI_ERROR((AE_INFO,
+				    "Invalid zero length descriptor in resource list\n"));
+			return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH);
+		}
+
 		/* Perform the conversion */
 
 		if (resource->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 15d6eae..c0e5d2d 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -563,13 +563,19 @@
 
 	while (resource < resource_end) {
 
-		/* Sanity check the resource */
+		/* Sanity check the resource type */
 
 		if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
 			status = AE_AML_INVALID_RESOURCE_TYPE;
 			break;
 		}
 
+		/* Sanity check the length. It must not be zero, or we loop forever */
+
+		if (!resource->length) {
+			return_ACPI_STATUS(AE_AML_BAD_RESOURCE_LENGTH);
+		}
+
 		/* Invoke the user function, abort on any error returned */
 
 		status = user_function(resource, context);
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 74181bf..33b00d2 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -559,8 +559,12 @@
 		/*
 		 * For each extended field, check for length mismatch between the
 		 * legacy length field and the corresponding 64-bit X length field.
+		 * Note: If the legacy length field is > 0xFF bits, ignore this
+		 * check. (GPE registers can be larger than the 64-bit GAS structure
+		 * can accomodate, 0xFF bits).
 		 */
 		if (address64->address &&
+		    (ACPI_MUL_8(length) <= ACPI_UINT8_MAX) &&
 		    (address64->bit_width != ACPI_MUL_8(length))) {
 			ACPI_BIOS_WARNING((AE_INFO,
 					   "32/64X length mismatch in FADT/%s: %u/%u",
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index b35a5e6..ad11162 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -1,6 +1,6 @@
 /******************************************************************************
  *
- * Module Name: tbxface - ACPI table oriented external interfaces
+ * Module Name: tbxface - ACPI table-oriented external interfaces
  *
  *****************************************************************************/
 
@@ -80,7 +80,7 @@
  *                                    array is dynamically allocated.
  *              initial_table_count - Size of initial_table_array, in number of
  *                                    struct acpi_table_desc structures
- *              allow_realloc       - Flag to tell Table Manager if resize of
+ *              allow_resize        - Flag to tell Table Manager if resize of
  *                                    pre-allocated array is allowed. Ignored
  *                                    if initial_table_array is NULL.
  *
@@ -107,8 +107,8 @@
 	ACPI_FUNCTION_TRACE(acpi_initialize_tables);
 
 	/*
-	 * Set up the Root Table Array
-	 * Allocate the table array if requested
+	 * Setup the Root Table Array and allocate the table array
+	 * if requested
 	 */
 	if (!initial_table_array) {
 		status = acpi_allocate_root_table(initial_table_count);
@@ -305,9 +305,10 @@
  *              instance            - Which instance (for SSDTs)
  *              out_table           - Where the pointer to the table is returned
  *
- * RETURN:      Status and pointer to table
+ * RETURN:      Status and pointer to the requested table
  *
- * DESCRIPTION: Finds and verifies an ACPI table.
+ * DESCRIPTION: Finds and verifies an ACPI table. Table must be in the
+ *              RSDT/XSDT.
  *
  ******************************************************************************/
 acpi_status
@@ -375,9 +376,10 @@
  * PARAMETERS:  table_index         - Table index
  *              table               - Where the pointer to the table is returned
  *
- * RETURN:      Status and pointer to the table
+ * RETURN:      Status and pointer to the requested table
  *
- * DESCRIPTION: Obtain a table by an index into the global table list.
+ * DESCRIPTION: Obtain a table by an index into the global table list. Used
+ *              internally also.
  *
  ******************************************************************************/
 acpi_status
@@ -432,7 +434,7 @@
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Install table event handler
+ * DESCRIPTION: Install a global table event handler.
  *
  ******************************************************************************/
 acpi_status
@@ -479,7 +481,7 @@
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Remove table event handler
+ * DESCRIPTION: Remove a table event handler
  *
  ******************************************************************************/
 acpi_status acpi_remove_table_handler(acpi_table_handler handler)
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index 698b9d3..e0a2e27 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -214,7 +214,7 @@
 
 	if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
 	    (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
-		return_VALUE(0);
+		return_UINT32(0);
 	}
 
 	range_info = acpi_gbl_address_range_list[space_id];
@@ -256,7 +256,7 @@
 		range_info = range_info->next;
 	}
 
-	return_VALUE(overlap_count);
+	return_UINT32(overlap_count);
 }
 
 /*******************************************************************************
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index e0e8579..a877a96 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -85,7 +85,6 @@
 	/* Populate the cache object and return it */
 
 	ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list));
-	cache->link_offset = 8;
 	cache->list_name = cache_name;
 	cache->object_size = object_size;
 	cache->max_depth = max_depth;
@@ -108,7 +107,7 @@
 
 acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache)
 {
-	char *next;
+	void *next;
 	acpi_status status;
 
 	ACPI_FUNCTION_ENTRY();
@@ -128,10 +127,7 @@
 
 		/* Delete and unlink one cached state object */
 
-		next = *(ACPI_CAST_INDIRECT_PTR(char,
-						&(((char *)cache->
-						   list_head)[cache->
-							      link_offset])));
+		next = ACPI_GET_DESCRIPTOR_PTR(cache->list_head);
 		ACPI_FREE(cache->list_head);
 
 		cache->list_head = next;
@@ -221,10 +217,7 @@
 
 		/* Put the object at the head of the cache list */
 
-		*(ACPI_CAST_INDIRECT_PTR(char,
-					 &(((char *)object)[cache->
-							    link_offset]))) =
-		    cache->list_head;
+		ACPI_SET_DESCRIPTOR_PTR(object, cache->list_head);
 		cache->list_head = object;
 		cache->current_depth++;
 
@@ -272,10 +265,7 @@
 		/* There is an object available, use it */
 
 		object = cache->list_head;
-		cache->list_head = *(ACPI_CAST_INDIRECT_PTR(char,
-							    &(((char *)
-							       object)[cache->
-								       link_offset])));
+		cache->list_head = ACPI_GET_DESCRIPTOR_PTR(object);
 
 		cache->current_depth--;
 
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 2541de4..29b9302 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -359,19 +359,20 @@
  * FUNCTION:    acpi_ut_update_ref_count
  *
  * PARAMETERS:  object          - Object whose ref count is to be updated
- *              action          - What to do
+ *              action          - What to do (REF_INCREMENT or REF_DECREMENT)
  *
- * RETURN:      New ref count
+ * RETURN:      None. Sets new reference count within the object
  *
- * DESCRIPTION: Modify the ref count and return it.
+ * DESCRIPTION: Modify the reference count for an internal acpi object
  *
  ******************************************************************************/
 
 static void
 acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
 {
-	u16 count;
-	u16 new_count;
+	u16 original_count;
+	u16 new_count = 0;
+	acpi_cpu_flags lock_flags;
 
 	ACPI_FUNCTION_NAME(ut_update_ref_count);
 
@@ -379,76 +380,79 @@
 		return;
 	}
 
-	count = object->common.reference_count;
-	new_count = count;
-
 	/*
-	 * Perform the reference count action (increment, decrement, force delete)
+	 * Always get the reference count lock. Note: Interpreter and/or
+	 * Namespace is not always locked when this function is called.
 	 */
+	lock_flags = acpi_os_acquire_lock(acpi_gbl_reference_count_lock);
+	original_count = object->common.reference_count;
+
+	/* Perform the reference count action (increment, decrement) */
+
 	switch (action) {
 	case REF_INCREMENT:
 
-		new_count++;
+		new_count = original_count + 1;
 		object->common.reference_count = new_count;
+		acpi_os_release_lock(acpi_gbl_reference_count_lock, lock_flags);
+
+		/* The current reference count should never be zero here */
+
+		if (!original_count) {
+			ACPI_WARNING((AE_INFO,
+				      "Obj %p, Reference Count was zero before increment\n",
+				      object));
+		}
 
 		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-				  "Obj %p Refs=%X, [Incremented]\n",
-				  object, new_count));
+				  "Obj %p Type %.2X Refs %.2X [Incremented]\n",
+				  object, object->common.type, new_count));
 		break;
 
 	case REF_DECREMENT:
 
-		if (count < 1) {
-			ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-					  "Obj %p Refs=%X, can't decrement! (Set to 0)\n",
-					  object, new_count));
+		/* The current reference count must be non-zero */
 
-			new_count = 0;
-		} else {
-			new_count--;
-
-			ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-					  "Obj %p Refs=%X, [Decremented]\n",
-					  object, new_count));
+		if (original_count) {
+			new_count = original_count - 1;
+			object->common.reference_count = new_count;
 		}
 
-		if (object->common.type == ACPI_TYPE_METHOD) {
-			ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-					  "Method Obj %p Refs=%X, [Decremented]\n",
-					  object, new_count));
+		acpi_os_release_lock(acpi_gbl_reference_count_lock, lock_flags);
+
+		if (!original_count) {
+			ACPI_WARNING((AE_INFO,
+				      "Obj %p, Reference Count is already zero, cannot decrement\n",
+				      object));
 		}
 
-		object->common.reference_count = new_count;
+		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+				  "Obj %p Type %.2X Refs %.2X [Decremented]\n",
+				  object, object->common.type, new_count));
+
+		/* Actually delete the object on a reference count of zero */
+
 		if (new_count == 0) {
 			acpi_ut_delete_internal_obj(object);
 		}
 		break;
 
-	case REF_FORCE_DELETE:
-
-		ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
-				  "Obj %p Refs=%X, Force delete! (Set to 0)\n",
-				  object, count));
-
-		new_count = 0;
-		object->common.reference_count = new_count;
-		acpi_ut_delete_internal_obj(object);
-		break;
-
 	default:
 
-		ACPI_ERROR((AE_INFO, "Unknown action (0x%X)", action));
-		break;
+		acpi_os_release_lock(acpi_gbl_reference_count_lock, lock_flags);
+		ACPI_ERROR((AE_INFO, "Unknown Reference Count action (0x%X)",
+			    action));
+		return;
 	}
 
 	/*
 	 * Sanity check the reference count, for debug purposes only.
 	 * (A deleted object will have a huge reference count)
 	 */
-	if (count > ACPI_MAX_REFERENCE_COUNT) {
+	if (new_count > ACPI_MAX_REFERENCE_COUNT) {
 		ACPI_WARNING((AE_INFO,
-			      "Large Reference Count (0x%X) in object %p",
-			      count, object));
+			      "Large Reference Count (0x%X) in object %p, Type=0x%.2X",
+			      new_count, object, object->common.type));
 	}
 }
 
@@ -458,8 +462,7 @@
  *
  * PARAMETERS:  object              - Increment ref count for this object
  *                                    and all sub-objects
- *              action              - Either REF_INCREMENT or REF_DECREMENT or
- *                                    REF_FORCE_DELETE
+ *              action              - Either REF_INCREMENT or REF_DECREMENT
  *
  * RETURN:      Status
  *
@@ -714,7 +717,6 @@
 	/*
 	 * Allow a NULL pointer to be passed in, just ignore it. This saves
 	 * each caller from having to check. Also, ignore NS nodes.
-	 *
 	 */
 	if (!object ||
 	    (ACPI_GET_DESCRIPTOR_TYPE(object) == ACPI_DESC_TYPE_NAMED)) {
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
index a0ab7c0..b543a14 100644
--- a/drivers/acpi/acpica/utexcep.c
+++ b/drivers/acpi/acpica/utexcep.c
@@ -64,7 +64,7 @@
  ******************************************************************************/
 const char *acpi_format_exception(acpi_status status)
 {
-	const char *exception = NULL;
+	const struct acpi_exception_info *exception;
 
 	ACPI_FUNCTION_ENTRY();
 
@@ -76,10 +76,10 @@
 		ACPI_ERROR((AE_INFO,
 			    "Unknown exception code: 0x%8.8X", status));
 
-		exception = "UNKNOWN_STATUS_CODE";
+		return ("UNKNOWN_STATUS_CODE");
 	}
 
-	return (ACPI_CAST_PTR(const char, exception));
+	return (exception->name);
 }
 
 ACPI_EXPORT_SYMBOL(acpi_format_exception)
@@ -97,10 +97,10 @@
  *              an ASCII string.
  *
  ******************************************************************************/
-const char *acpi_ut_validate_exception(acpi_status status)
+const struct acpi_exception_info *acpi_ut_validate_exception(acpi_status status)
 {
 	u32 sub_status;
-	const char *exception = NULL;
+	const struct acpi_exception_info *exception = NULL;
 
 	ACPI_FUNCTION_ENTRY();
 
@@ -113,35 +113,35 @@
 	case AE_CODE_ENVIRONMENTAL:
 
 		if (sub_status <= AE_CODE_ENV_MAX) {
-			exception = acpi_gbl_exception_names_env[sub_status];
+			exception = &acpi_gbl_exception_names_env[sub_status];
 		}
 		break;
 
 	case AE_CODE_PROGRAMMER:
 
 		if (sub_status <= AE_CODE_PGM_MAX) {
-			exception = acpi_gbl_exception_names_pgm[sub_status];
+			exception = &acpi_gbl_exception_names_pgm[sub_status];
 		}
 		break;
 
 	case AE_CODE_ACPI_TABLES:
 
 		if (sub_status <= AE_CODE_TBL_MAX) {
-			exception = acpi_gbl_exception_names_tbl[sub_status];
+			exception = &acpi_gbl_exception_names_tbl[sub_status];
 		}
 		break;
 
 	case AE_CODE_AML:
 
 		if (sub_status <= AE_CODE_AML_MAX) {
-			exception = acpi_gbl_exception_names_aml[sub_status];
+			exception = &acpi_gbl_exception_names_aml[sub_status];
 		}
 		break;
 
 	case AE_CODE_CONTROL:
 
 		if (sub_status <= AE_CODE_CTRL_MAX) {
-			exception = acpi_gbl_exception_names_ctrl[sub_status];
+			exception = &acpi_gbl_exception_names_ctrl[sub_status];
 		}
 		break;
 
@@ -149,5 +149,9 @@
 		break;
 	}
 
-	return (ACPI_CAST_PTR(const char, exception));
+	if (!exception || !exception->name) {
+		return (NULL);
+	}
+
+	return (exception);
 }
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index ffecf4b..f736448 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -359,6 +359,8 @@
 
 #ifdef ACPI_DISASSEMBLER
 	acpi_gbl_external_list = NULL;
+	acpi_gbl_num_external_methods = 0;
+	acpi_gbl_resolved_external_methods = 0;
 #endif
 
 #ifdef ACPI_DEBUG_OUTPUT
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 22feb99..08c3232 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -81,7 +81,7 @@
 		}
 	}
 
-	/* Create the spinlocks for use at interrupt level */
+	/* Create the spinlocks for use at interrupt level or for speed */
 
 	status = acpi_os_create_lock (&acpi_gbl_gpe_lock);
 	if (ACPI_FAILURE (status)) {
@@ -93,7 +93,13 @@
 		return_ACPI_STATUS (status);
 	}
 
+	status = acpi_os_create_lock(&acpi_gbl_reference_count_lock);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
 	/* Mutex for _OSI support */
+
 	status = acpi_os_create_mutex(&acpi_gbl_osi_mutex);
 	if (ACPI_FAILURE(status)) {
 		return_ACPI_STATUS(status);
@@ -136,6 +142,7 @@
 
 	acpi_os_delete_lock(acpi_gbl_gpe_lock);
 	acpi_os_delete_lock(acpi_gbl_hardware_lock);
+	acpi_os_delete_lock(acpi_gbl_reference_count_lock);
 
 	/* Delete the reader/writer lock */
 
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 36a7d36..b15aceb 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -108,9 +108,14 @@
 
 acpi_status acpi_ut_initialize_interfaces(void)
 {
+	acpi_status status;
 	u32 i;
 
-	(void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+	status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
+
 	acpi_gbl_supported_interfaces = acpi_default_supported_interfaces;
 
 	/* Link the static list of supported interfaces */
@@ -132,20 +137,24 @@
  *
  * PARAMETERS:  None
  *
- * RETURN:      None
+ * RETURN:      Status
  *
  * DESCRIPTION: Delete all interfaces in the global list. Sets
  *              acpi_gbl_supported_interfaces to NULL.
  *
  ******************************************************************************/
 
-void acpi_ut_interface_terminate(void)
+acpi_status acpi_ut_interface_terminate(void)
 {
+	acpi_status status;
 	struct acpi_interface_info *next_interface;
 
-	(void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
-	next_interface = acpi_gbl_supported_interfaces;
+	status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
 
+	next_interface = acpi_gbl_supported_interfaces;
 	while (next_interface) {
 		acpi_gbl_supported_interfaces = next_interface->next;
 
@@ -160,6 +169,7 @@
 	}
 
 	acpi_os_release_mutex(acpi_gbl_osi_mutex);
+	return (AE_OK);
 }
 
 /*******************************************************************************
@@ -315,6 +325,7 @@
 	union acpi_operand_object *return_desc;
 	struct acpi_interface_info *interface_info;
 	acpi_interface_handler interface_handler;
+	acpi_status status;
 	u32 return_value;
 
 	ACPI_FUNCTION_TRACE(ut_osi_implementation);
@@ -336,7 +347,10 @@
 	/* Default return value is 0, NOT SUPPORTED */
 
 	return_value = 0;
-	(void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+	status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
 
 	/* Lookup the interface in the global _OSI list */
 
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
new file mode 100644
index 0000000..2945947
--- /dev/null
+++ b/drivers/acpi/acpica/utpredef.c
@@ -0,0 +1,399 @@
+/******************************************************************************
+ *
+ * Module Name: utpredef - support functions for predefined names
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2013, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acpredef.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utpredef")
+
+/*
+ * Names for the types that can be returned by the predefined objects.
+ * Used for warning messages. Must be in the same order as the ACPI_RTYPEs
+ */
+static const char *ut_rtype_names[] = {
+	"/Integer",
+	"/String",
+	"/Buffer",
+	"/Package",
+	"/Reference",
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_next_predefined_method
+ *
+ * PARAMETERS:  this_name           - Entry in the predefined method/name table
+ *
+ * RETURN:      Pointer to next entry in predefined table.
+ *
+ * DESCRIPTION: Get the next entry in the predefine method table. Handles the
+ *              cases where a package info entry follows a method name that
+ *              returns a package.
+ *
+ ******************************************************************************/
+
+const union acpi_predefined_info *acpi_ut_get_next_predefined_method(const union
+								     acpi_predefined_info
+								     *this_name)
+{
+
+	/*
+	 * Skip next entry in the table if this name returns a Package
+	 * (next entry contains the package info)
+	 */
+	if ((this_name->info.expected_btypes & ACPI_RTYPE_PACKAGE) &&
+	    (this_name->info.expected_btypes != ACPI_RTYPE_ALL)) {
+		this_name++;
+	}
+
+	this_name++;
+	return (this_name);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_match_predefined_method
+ *
+ * PARAMETERS:  name                - Name to find
+ *
+ * RETURN:      Pointer to entry in predefined table. NULL indicates not found.
+ *
+ * DESCRIPTION: Check an object name against the predefined object list.
+ *
+ ******************************************************************************/
+
+const union acpi_predefined_info *acpi_ut_match_predefined_method(char *name)
+{
+	const union acpi_predefined_info *this_name;
+
+	/* Quick check for a predefined name, first character must be underscore */
+
+	if (name[0] != '_') {
+		return (NULL);
+	}
+
+	/* Search info table for a predefined method/object name */
+
+	this_name = acpi_gbl_predefined_methods;
+	while (this_name->info.name[0]) {
+		if (ACPI_COMPARE_NAME(name, this_name->info.name)) {
+			return (this_name);
+		}
+
+		this_name = acpi_ut_get_next_predefined_method(this_name);
+	}
+
+	return (NULL);		/* Not found */
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_expected_return_types
+ *
+ * PARAMETERS:  buffer              - Where the formatted string is returned
+ *              expected_Btypes     - Bitfield of expected data types
+ *
+ * RETURN:      Formatted string in Buffer.
+ *
+ * DESCRIPTION: Format the expected object types into a printable string.
+ *
+ ******************************************************************************/
+
+void acpi_ut_get_expected_return_types(char *buffer, u32 expected_btypes)
+{
+	u32 this_rtype;
+	u32 i;
+	u32 j;
+
+	j = 1;
+	buffer[0] = 0;
+	this_rtype = ACPI_RTYPE_INTEGER;
+
+	for (i = 0; i < ACPI_NUM_RTYPES; i++) {
+
+		/* If one of the expected types, concatenate the name of this type */
+
+		if (expected_btypes & this_rtype) {
+			ACPI_STRCAT(buffer, &ut_rtype_names[i][j]);
+			j = 0;	/* Use name separator from now on */
+		}
+
+		this_rtype <<= 1;	/* Next Rtype */
+	}
+}
+
+/*******************************************************************************
+ *
+ * The remaining functions are used by iASL and acpi_help only
+ *
+ ******************************************************************************/
+
+#if (defined ACPI_ASL_COMPILER || defined ACPI_HELP_APP)
+#include <stdio.h>
+#include <string.h>
+
+/* Local prototypes */
+
+static u32 acpi_ut_get_argument_types(char *buffer, u16 argument_types);
+
+/* Types that can be returned externally by a predefined name */
+
+static const char *ut_external_type_names[] =	/* Indexed by ACPI_TYPE_* */
+{
+	", UNSUPPORTED-TYPE",
+	", Integer",
+	", String",
+	", Buffer",
+	", Package"
+};
+
+/* Bit widths for resource descriptor predefined names */
+
+static const char *ut_resource_type_names[] = {
+	"/1",
+	"/2",
+	"/3",
+	"/8",
+	"/16",
+	"/32",
+	"/64",
+	"/variable",
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_match_resource_name
+ *
+ * PARAMETERS:  name                - Name to find
+ *
+ * RETURN:      Pointer to entry in the resource table. NULL indicates not
+ *              found.
+ *
+ * DESCRIPTION: Check an object name against the predefined resource
+ *              descriptor object list.
+ *
+ ******************************************************************************/
+
+const union acpi_predefined_info *acpi_ut_match_resource_name(char *name)
+{
+	const union acpi_predefined_info *this_name;
+
+	/* Quick check for a predefined name, first character must be underscore */
+
+	if (name[0] != '_') {
+		return (NULL);
+	}
+
+	/* Search info table for a predefined method/object name */
+
+	this_name = acpi_gbl_resource_names;
+	while (this_name->info.name[0]) {
+		if (ACPI_COMPARE_NAME(name, this_name->info.name)) {
+			return (this_name);
+		}
+
+		this_name++;
+	}
+
+	return (NULL);		/* Not found */
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_display_predefined_method
+ *
+ * PARAMETERS:  buffer              - Scratch buffer for this function
+ *              this_name           - Entry in the predefined method/name table
+ *              multi_line          - TRUE if output should be on >1 line
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Display information about a predefined method. Number and
+ *              type of the input arguments, and expected type(s) for the
+ *              return value, if any.
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_display_predefined_method(char *buffer,
+				  const union acpi_predefined_info *this_name,
+				  u8 multi_line)
+{
+	u32 arg_count;
+
+	/*
+	 * Get the argument count and the string buffer
+	 * containing all argument types
+	 */
+	arg_count = acpi_ut_get_argument_types(buffer,
+					       this_name->info.argument_list);
+
+	if (multi_line) {
+		printf("      ");
+	}
+
+	printf("%4.4s    Requires %s%u argument%s",
+	       this_name->info.name,
+	       (this_name->info.argument_list & ARG_COUNT_IS_MINIMUM) ?
+	       "(at least) " : "", arg_count, arg_count != 1 ? "s" : "");
+
+	/* Display the types for any arguments */
+
+	if (arg_count > 0) {
+		printf(" (%s)", buffer);
+	}
+
+	if (multi_line) {
+		printf("\n    ");
+	}
+
+	/* Get the return value type(s) allowed */
+
+	if (this_name->info.expected_btypes) {
+		acpi_ut_get_expected_return_types(buffer,
+						  this_name->info.
+						  expected_btypes);
+		printf("  Return value types: %s\n", buffer);
+	} else {
+		printf("  No return value\n");
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_argument_types
+ *
+ * PARAMETERS:  buffer              - Where to return the formatted types
+ *              argument_types      - Types field for this method
+ *
+ * RETURN:      count - the number of arguments required for this method
+ *
+ * DESCRIPTION: Format the required data types for this method (Integer,
+ *              String, Buffer, or Package) and return the required argument
+ *              count.
+ *
+ ******************************************************************************/
+
+static u32 acpi_ut_get_argument_types(char *buffer, u16 argument_types)
+{
+	u16 this_argument_type;
+	u16 sub_index;
+	u16 arg_count;
+	u32 i;
+
+	*buffer = 0;
+	sub_index = 2;
+
+	/* First field in the types list is the count of args to follow */
+
+	arg_count = (argument_types & METHOD_ARG_MASK);
+	argument_types >>= METHOD_ARG_BIT_WIDTH;
+
+	if (arg_count > METHOD_PREDEF_ARGS_MAX) {
+		printf("**** Invalid argument count (%u) "
+		       "in predefined info structure\n", arg_count);
+		return (arg_count);
+	}
+
+	/* Get each argument from the list, convert to ascii, store to buffer */
+
+	for (i = 0; i < arg_count; i++) {
+		this_argument_type = (argument_types & METHOD_ARG_MASK);
+		if (!this_argument_type
+		    || (this_argument_type > METHOD_MAX_ARG_TYPE)) {
+			printf("**** Invalid argument type (%u) "
+			       "in predefined info structure\n",
+			       this_argument_type);
+			return (arg_count);
+		}
+
+		strcat(buffer,
+		       ut_external_type_names[this_argument_type] + sub_index);
+
+		/* Shift to next argument type field */
+
+		argument_types >>= METHOD_ARG_BIT_WIDTH;
+		sub_index = 0;
+	}
+
+	return (arg_count);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_resource_bit_width
+ *
+ * PARAMETERS:  buffer              - Where the formatted string is returned
+ *              types               - Bitfield of expected data types
+ *
+ * RETURN:      Count of return types. Formatted string in Buffer.
+ *
+ * DESCRIPTION: Format the resource bit widths into a printable string.
+ *
+ ******************************************************************************/
+
+u32 acpi_ut_get_resource_bit_width(char *buffer, u16 types)
+{
+	u32 i;
+	u16 sub_index;
+	u32 found;
+
+	*buffer = 0;
+	sub_index = 1;
+	found = 0;
+
+	for (i = 0; i < NUM_RESOURCE_WIDTHS; i++) {
+		if (types & 1) {
+			strcat(buffer, &(ut_resource_type_names[i][sub_index]));
+			sub_index = 0;
+			found++;
+		}
+
+		types >>= 1;
+	}
+
+	return (found);
+}
+#endif
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 48efb44..6505774 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -287,7 +287,10 @@
 		return (AE_BAD_PARAMETER);
 	}
 
-	(void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+	status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
 
 	/* Check if the interface name is already in the global list */
 
@@ -336,7 +339,10 @@
 		return (AE_BAD_PARAMETER);
 	}
 
-	(void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+	status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
 
 	status = acpi_ut_remove_interface(interface_name);
 
@@ -362,9 +368,12 @@
  ****************************************************************************/
 acpi_status acpi_install_interface_handler(acpi_interface_handler handler)
 {
-	acpi_status status = AE_OK;
+	acpi_status status;
 
-	(void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+	status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+	if (ACPI_FAILURE(status)) {
+		return (status);
+	}
 
 	if (handler && acpi_gbl_interface_handler) {
 		status = AE_ALREADY_EXISTS;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index c5cd5b5..0cc384b 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -146,7 +146,7 @@
 
 #define to_acpi_battery(x) container_of(x, struct acpi_battery, bat)
 
-inline int acpi_battery_present(struct acpi_battery *battery)
+static inline int acpi_battery_present(struct acpi_battery *battery)
 {
 	return battery->device->status.battery_present;
 }
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 01708a1..292de3c 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -288,13 +288,12 @@
 	}
 out_success:
 	context->ret.length = out_obj->buffer.length;
-	context->ret.pointer = kmalloc(context->ret.length, GFP_KERNEL);
+	context->ret.pointer = kmemdup(out_obj->buffer.pointer,
+				       context->ret.length, GFP_KERNEL);
 	if (!context->ret.pointer) {
 		status =  AE_NO_MEMORY;
 		goto out_kfree;
 	}
-	memcpy(context->ret.pointer, out_obj->buffer.pointer,
-		context->ret.length);
 	status =  AE_OK;
 
 out_kfree:
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 86c7d54..92a659a 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
+#include <acpi/button.h>
 
 #define PREFIX "ACPI: "
 
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 5523ba7..e231516 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -1,12 +1,12 @@
 /*
- * acpi_container.c  - ACPI Generic Container Driver
- * ($Revision: )
+ * container.c  - ACPI Generic Container Driver
  *
  * Copyright (C) 2004 Anil S Keshavamurthy (anil.s.keshavamurthy@intel.com)
  * Copyright (C) 2004 Keiichiro Tokunaga (tokunaga.keiich@jp.fujitsu.com)
  * Copyright (C) 2004 Motoyuki Ito (motoyuki@soft.fujitsu.com)
- * Copyright (C) 2004 Intel Corp.
  * Copyright (C) 2004 FUJITSU LIMITED
+ * Copyright (C) 2004, 2013 Intel Corp.
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
@@ -26,14 +26,11 @@
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/types.h>
 #include <linux/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+
+#include "internal.h"
+
+#include "internal.h"
 
 #define PREFIX "ACPI: "
 
@@ -50,141 +47,20 @@
 static int container_device_attach(struct acpi_device *device,
 				   const struct acpi_device_id *not_used)
 {
-	/*
-	 * FIXME: This is necessary, so that acpi_eject_store() doesn't return
-	 * -ENODEV for containers.
-	 */
+	/* This is necessary for container hotplug to work. */
 	return 1;
 }
 
-static struct acpi_scan_handler container_device_handler = {
+static struct acpi_scan_handler container_handler = {
 	.ids = container_device_ids,
 	.attach = container_device_attach,
+	.hotplug = {
+		.enabled = true,
+		.mode = AHM_CONTAINER,
+	},
 };
 
-static int is_device_present(acpi_handle handle)
-{
-	acpi_handle temp;
-	acpi_status status;
-	unsigned long long sta;
-
-
-	status = acpi_get_handle(handle, "_STA", &temp);
-	if (ACPI_FAILURE(status))
-		return 1;	/* _STA not found, assume device present */
-
-	status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
-	if (ACPI_FAILURE(status))
-		return 0;	/* Firmware error */
-
-	return ((sta & ACPI_STA_DEVICE_PRESENT) == ACPI_STA_DEVICE_PRESENT);
-}
-
-static void container_notify_cb(acpi_handle handle, u32 type, void *context)
-{
-	struct acpi_device *device = NULL;
-	int result;
-	int present;
-	acpi_status status;
-	u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
-
-	acpi_scan_lock_acquire();
-
-	switch (type) {
-	case ACPI_NOTIFY_BUS_CHECK:
-		/* Fall through */
-	case ACPI_NOTIFY_DEVICE_CHECK:
-		pr_debug("Container driver received %s event\n",
-		       (type == ACPI_NOTIFY_BUS_CHECK) ?
-		       "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK");
-
-		present = is_device_present(handle);
-		status = acpi_bus_get_device(handle, &device);
-		if (!present) {
-			if (ACPI_SUCCESS(status)) {
-				/* device exist and this is a remove request */
-				device->flags.eject_pending = 1;
-				kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
-				goto out;
-			}
-			break;
-		}
-
-		if (!ACPI_FAILURE(status) || device)
-			break;
-
-		result = acpi_bus_scan(handle);
-		if (result) {
-			acpi_handle_warn(handle, "Failed to add container\n");
-			break;
-		}
-		result = acpi_bus_get_device(handle, &device);
-		if (result) {
-			acpi_handle_warn(handle, "Missing device object\n");
-			break;
-		}
-
-		kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
-		ost_code = ACPI_OST_SC_SUCCESS;
-		break;
-
-	case ACPI_NOTIFY_EJECT_REQUEST:
-		if (!acpi_bus_get_device(handle, &device) && device) {
-			device->flags.eject_pending = 1;
-			kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
-			goto out;
-		}
-		break;
-
-	default:
-		/* non-hotplug event; possibly handled by other handler */
-		goto out;
-	}
-
-	/* Inform firmware that the hotplug operation has completed */
-	(void) acpi_evaluate_hotplug_ost(handle, type, ost_code, NULL);
-
- out:
-	acpi_scan_lock_release();
-}
-
-static bool is_container(acpi_handle handle)
-{
-	struct acpi_device_info *info;
-	bool ret = false;
-
-	if (ACPI_FAILURE(acpi_get_object_info(handle, &info)))
-		return false;
-
-	if (info->valid & ACPI_VALID_HID) {
-		const struct acpi_device_id *id;
-
-		for (id = container_device_ids; id->id[0]; id++) {
-			ret = !strcmp((char *)id->id, info->hardware_id.string);
-			if (ret)
-				break;
-		}
-	}
-	kfree(info);
-	return ret;
-}
-
-static acpi_status acpi_container_register_notify_handler(acpi_handle handle,
-							  u32 lvl, void *ctxt,
-							  void **retv)
-{
-	if (is_container(handle))
-		acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
-					    container_notify_cb, NULL);
-
-	return AE_OK;
-}
-
 void __init acpi_container_init(void)
 {
-	acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
-			    acpi_container_register_notify_handler, NULL,
-			    NULL, NULL);
-
-	acpi_scan_add_handler(&container_device_handler);
+	acpi_scan_add_handler_with_hotplug(&container_handler, "container");
 }
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index dd314ef..96de787 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -145,27 +145,36 @@
 	}
 
 	/*
-	 * Get the device's power state either directly (via _PSC) or
-	 * indirectly (via power resources).
+	 * Get the device's power state from power resources settings and _PSC,
+	 * if available.
 	 */
-	if (device->power.flags.explicit_get) {
-		unsigned long long psc;
-		acpi_status status = acpi_evaluate_integer(device->handle,
-							   "_PSC", NULL, &psc);
-		if (ACPI_FAILURE(status))
-			return -ENODEV;
-
-		result = psc;
-	}
-	/* The test below covers ACPI_STATE_UNKNOWN too. */
-	if (result <= ACPI_STATE_D2) {
-	  ; /* Do nothing. */
-	} else if (device->power.flags.power_resources) {
+	if (device->power.flags.power_resources) {
 		int error = acpi_power_get_inferred_state(device, &result);
 		if (error)
 			return error;
-	} else if (result == ACPI_STATE_D3_HOT) {
-		result = ACPI_STATE_D3;
+	}
+	if (device->power.flags.explicit_get) {
+		acpi_handle handle = device->handle;
+		unsigned long long psc;
+		acpi_status status;
+
+		status = acpi_evaluate_integer(handle, "_PSC", NULL, &psc);
+		if (ACPI_FAILURE(status))
+			return -ENODEV;
+
+		/*
+		 * The power resources settings may indicate a power state
+		 * shallower than the actual power state of the device.
+		 *
+		 * Moreover, on systems predating ACPI 4.0, if the device
+		 * doesn't depend on any power resources and _PSC returns 3,
+		 * that means "power off".  We need to maintain compatibility
+		 * with those systems.
+		 */
+		if (psc > result && psc < ACPI_STATE_D3_COLD)
+			result = psc;
+		else if (result == ACPI_STATE_UNKNOWN)
+			result = psc > ACPI_STATE_D2 ? ACPI_STATE_D3_COLD : psc;
 	}
 
 	/*
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index f815da8..8d1c010 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -174,9 +174,13 @@
 
 static int acpi_fan_remove(struct acpi_device *device)
 {
-	struct thermal_cooling_device *cdev = acpi_driver_data(device);
+	struct thermal_cooling_device *cdev;
 
-	if (!device || !cdev)
+	if (!device)
+		return -EINVAL;
+
+	cdev =  acpi_driver_data(device);
+	if (!cdev)
 		return -EINVAL;
 
 	sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 3c94a73..6f1afd9 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -41,6 +41,17 @@
 #else
 static inline void acpi_container_init(void) {}
 #endif
+#ifdef CONFIG_ACPI_HOTPLUG_MEMORY
+void acpi_memory_hotplug_init(void);
+#else
+static inline void acpi_memory_hotplug_init(void) {}
+#endif
+
+void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
+				    const char *name);
+int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
+				       const char *hotplug_profile_name);
+void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val);
 
 #ifdef CONFIG_DEBUG_FS
 extern struct dentry *acpi_debugfs_dir;
@@ -48,6 +59,11 @@
 #else
 static inline void acpi_debugfs_init(void) { return; }
 #endif
+#ifdef CONFIG_X86_INTEL_LPSS
+void acpi_lpss_init(void);
+#else
+static inline void acpi_lpss_init(void) {}
+#endif
 
 /* --------------------------------------------------------------------------
                      Device Node Initialization / Removal
@@ -60,7 +76,7 @@
 void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
 			     int type, unsigned long long sta);
 void acpi_device_add_finalize(struct acpi_device *device);
-void acpi_free_ids(struct acpi_device *device);
+void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
 
 /* --------------------------------------------------------------------------
                                   Power Resource
@@ -131,4 +147,7 @@
   -------------------------------------------------------------------------- */
 struct platform_device;
 
+int acpi_create_platform_device(struct acpi_device *adev,
+				const struct acpi_device_id *id);
+
 #endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 586e7e9..e721863 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -641,7 +641,7 @@
 	 * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
 	 * works fine.
 	 */
-	memblock_reserve(acpi_tables_addr, acpi_tables_addr + all_tables_size);
+	memblock_reserve(acpi_tables_addr, all_tables_size);
 	arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
 
 	p = early_ioremap(acpi_tables_addr, all_tables_size);
@@ -1555,7 +1555,7 @@
 	else
 		space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
 
-	length = res->end - res->start + 1;
+	length = resource_size(res);
 	if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
 		warn = 1;
 	clash = acpi_check_address_range(space_id, res->start, length, warn);
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index ab764ed..2652a61 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -354,6 +354,7 @@
 
 	}
 	resource->end.type = ACPI_RESOURCE_TYPE_END_TAG;
+	resource->end.length = sizeof(struct acpi_resource);
 
 	/* Attempt to set the resource */
 	status = acpi_set_current_resources(link->device->handle, &buffer);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index ac8688b..1dd6f6c 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -169,8 +169,8 @@
 		*control &= OSC_PCI_CONTROL_MASKS;
 		capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
 	} else {
-		/* Run _OSC query for all possible controls. */
-		capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS;
+		/* Run _OSC query only with existing controls. */
+		capbuf[OSC_CONTROL_TYPE] = root->osc_control_set;
 	}
 
 	status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 34f5ef1..f962047 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -459,57 +459,79 @@
 	},
 };
 
-static void acpi_power_hide_list(struct acpi_device *adev, int state)
+static struct attribute_group wakeup_attr_group = {
+	.name = "power_resources_wakeup",
+	.attrs = attrs,
+};
+
+static void acpi_power_hide_list(struct acpi_device *adev,
+				 struct list_head *resources,
+				 struct attribute_group *attr_group)
 {
-	struct acpi_device_power_state *ps = &adev->power.states[state];
 	struct acpi_power_resource_entry *entry;
 
-	if (list_empty(&ps->resources))
+	if (list_empty(resources))
 		return;
 
-	list_for_each_entry_reverse(entry, &ps->resources, node) {
+	list_for_each_entry_reverse(entry, resources, node) {
 		struct acpi_device *res_dev = &entry->resource->device;
 
 		sysfs_remove_link_from_group(&adev->dev.kobj,
-					     attr_groups[state].name,
+					     attr_group->name,
 					     dev_name(&res_dev->dev));
 	}
-	sysfs_remove_group(&adev->dev.kobj, &attr_groups[state]);
+	sysfs_remove_group(&adev->dev.kobj, attr_group);
 }
 
-static void acpi_power_expose_list(struct acpi_device *adev, int state)
+static void acpi_power_expose_list(struct acpi_device *adev,
+				   struct list_head *resources,
+				   struct attribute_group *attr_group)
 {
-	struct acpi_device_power_state *ps = &adev->power.states[state];
 	struct acpi_power_resource_entry *entry;
 	int ret;
 
-	if (list_empty(&ps->resources))
+	if (list_empty(resources))
 		return;
 
-	ret = sysfs_create_group(&adev->dev.kobj, &attr_groups[state]);
+	ret = sysfs_create_group(&adev->dev.kobj, attr_group);
 	if (ret)
 		return;
 
-	list_for_each_entry(entry, &ps->resources, node) {
+	list_for_each_entry(entry, resources, node) {
 		struct acpi_device *res_dev = &entry->resource->device;
 
 		ret = sysfs_add_link_to_group(&adev->dev.kobj,
-					      attr_groups[state].name,
+					      attr_group->name,
 					      &res_dev->dev.kobj,
 					      dev_name(&res_dev->dev));
 		if (ret) {
-			acpi_power_hide_list(adev, state);
+			acpi_power_hide_list(adev, resources, attr_group);
 			break;
 		}
 	}
 }
 
+static void acpi_power_expose_hide(struct acpi_device *adev,
+				   struct list_head *resources,
+				   struct attribute_group *attr_group,
+				   bool expose)
+{
+	if (expose)
+		acpi_power_expose_list(adev, resources, attr_group);
+	else
+		acpi_power_hide_list(adev, resources, attr_group);
+}
+
 void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
 {
 	struct acpi_device_power_state *ps;
 	struct acpi_power_resource_entry *entry;
 	int state;
 
+	if (adev->wakeup.flags.valid)
+		acpi_power_expose_hide(adev, &adev->wakeup.resources,
+				       &wakeup_attr_group, add);
+
 	if (!adev->power.flags.power_resources)
 		return;
 
@@ -523,12 +545,10 @@
 			acpi_power_remove_dependent(resource, adev);
 	}
 
-	for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++) {
-		if (add)
-			acpi_power_expose_list(adev, state);
-		else
-			acpi_power_hide_list(adev, state);
-	}
+	for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++)
+		acpi_power_expose_hide(adev,
+				       &adev->power.states[state].resources,
+				       &attr_groups[state], add);
 }
 
 int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p)
@@ -824,7 +844,7 @@
 	list_del(&resource->list_node);
 	mutex_unlock(&power_resource_list_lock);
 
-	acpi_free_ids(device);
+	acpi_free_pnp_ids(&device->pnp);
 	kfree(resource);
 }
 
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index ee255c6..f0df2c9 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -918,7 +918,6 @@
 struct cpuidle_driver acpi_idle_driver = {
 	.name =		"acpi_idle",
 	.owner =	THIS_MODULE,
-	.en_core_tk_irqen = 1,
 };
 
 /**
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 641b545..e8e6527 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -218,9 +218,13 @@
 			unsigned long *state)
 {
 	struct acpi_device *device = cdev->devdata;
-	struct acpi_processor *pr = acpi_driver_data(device);
+	struct acpi_processor *pr;
 
-	if (!device || !pr)
+	if (!device)
+		return -EINVAL;
+
+	pr = acpi_driver_data(device);
+	if (!pr)
 		return -EINVAL;
 
 	*state = acpi_processor_max_state(pr);
@@ -232,9 +236,13 @@
 			unsigned long *cur_state)
 {
 	struct acpi_device *device = cdev->devdata;
-	struct acpi_processor *pr = acpi_driver_data(device);
+	struct acpi_processor *pr;
 
-	if (!device || !pr)
+	if (!device)
+		return -EINVAL;
+
+	pr = acpi_driver_data(device);
+	if (!pr)
 		return -EINVAL;
 
 	*cur_state = cpufreq_get_cur_state(pr->id);
@@ -248,11 +256,15 @@
 			unsigned long state)
 {
 	struct acpi_device *device = cdev->devdata;
-	struct acpi_processor *pr = acpi_driver_data(device);
+	struct acpi_processor *pr;
 	int result = 0;
 	int max_pstate;
 
-	if (!device || !pr)
+	if (!device)
+		return -EINVAL;
+
+	pr = acpi_driver_data(device);
+	if (!pr)
 		return -EINVAL;
 
 	max_pstate = cpufreq_get_max_state(pr->id);
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 1d02b7b..e7dd2c1 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -211,9 +211,10 @@
  */
 void acpi_processor_throttling_init(void)
 {
-	if (acpi_processor_update_tsd_coord())
+	if (acpi_processor_update_tsd_coord()) {
 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 			"Assume no T-state coordination\n"));
+	}
 
 	return;
 }
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index f54d198..fe158fd 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -63,6 +63,19 @@
 	return 0;
 }
 
+int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
+				       const char *hotplug_profile_name)
+{
+	int error;
+
+	error = acpi_scan_add_handler(handler);
+	if (error)
+		return error;
+
+	acpi_sysfs_add_hotplug_profile(&handler->hotplug, hotplug_profile_name);
+	return 0;
+}
+
 /*
  * Creates hid/cid(s) string needed for modalias and uevent
  * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
@@ -107,32 +120,20 @@
 }
 static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
 
-/**
- * acpi_bus_hot_remove_device: hot-remove a device and its children
- * @context: struct acpi_eject_event pointer (freed in this func)
- *
- * Hot-remove a device and its children. This function frees up the
- * memory space passed by arg context, so that the caller may call
- * this function asynchronously through acpi_os_hotplug_execute().
- */
-void acpi_bus_hot_remove_device(void *context)
+static int acpi_scan_hot_remove(struct acpi_device *device)
 {
-	struct acpi_eject_event *ej_event = context;
-	struct acpi_device *device = ej_event->device;
 	acpi_handle handle = device->handle;
-	acpi_handle temp;
+	acpi_handle not_used;
 	struct acpi_object_list arg_list;
 	union acpi_object arg;
-	acpi_status status = AE_OK;
-	u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; /* default */
-
-	mutex_lock(&acpi_scan_lock);
+	acpi_status status;
+	unsigned long long sta;
 
 	/* If there is no handle, the device node has been unregistered. */
-	if (!device->handle) {
+	if (!handle) {
 		dev_dbg(&device->dev, "ACPI handle missing\n");
 		put_device(&device->dev);
-		goto out;
+		return -EINVAL;
 	}
 
 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -143,7 +144,7 @@
 	put_device(&device->dev);
 	device = NULL;
 
-	if (ACPI_SUCCESS(acpi_get_handle(handle, "_LCK", &temp))) {
+	if (ACPI_SUCCESS(acpi_get_handle(handle, "_LCK", &not_used))) {
 		arg_list.count = 1;
 		arg_list.pointer = &arg;
 		arg.type = ACPI_TYPE_INTEGER;
@@ -161,18 +162,205 @@
 	 */
 	status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
 	if (ACPI_FAILURE(status)) {
-		if (status != AE_NOT_FOUND)
-			acpi_handle_warn(handle, "Eject failed\n");
+		if (status == AE_NOT_FOUND) {
+			return -ENODEV;
+		} else {
+			acpi_handle_warn(handle, "Eject failed (0x%x)\n",
+								status);
+			return -EIO;
+		}
+	}
 
-		/* Tell the firmware the hot-remove operation has failed. */
-		acpi_evaluate_hotplug_ost(handle, ej_event->event,
-					  ost_code, NULL);
+	/*
+	 * Verify if eject was indeed successful.  If not, log an error
+	 * message.  No need to call _OST since _EJ0 call was made OK.
+	 */
+	status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
+	if (ACPI_FAILURE(status)) {
+		acpi_handle_warn(handle,
+			"Status check after eject failed (0x%x)\n", status);
+	} else if (sta & ACPI_STA_DEVICE_ENABLED) {
+		acpi_handle_warn(handle,
+			"Eject incomplete - status 0x%llx\n", sta);
+	}
+
+	return 0;
+}
+
+static void acpi_bus_device_eject(void *context)
+{
+	acpi_handle handle = context;
+	struct acpi_device *device = NULL;
+	struct acpi_scan_handler *handler;
+	u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
+
+	mutex_lock(&acpi_scan_lock);
+
+	acpi_bus_get_device(handle, &device);
+	if (!device)
+		goto err_out;
+
+	handler = device->handler;
+	if (!handler || !handler->hotplug.enabled) {
+		ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
+		goto err_out;
+	}
+	acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST,
+				  ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
+	if (handler->hotplug.mode == AHM_CONTAINER) {
+		device->flags.eject_pending = true;
+		kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
+	} else {
+		int error;
+
+		get_device(&device->dev);
+		error = acpi_scan_hot_remove(device);
+		if (error)
+			goto err_out;
 	}
 
  out:
 	mutex_unlock(&acpi_scan_lock);
-	kfree(context);
 	return;
+
+ err_out:
+	acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST, ost_code,
+				  NULL);
+	goto out;
+}
+
+static void acpi_scan_bus_device_check(acpi_handle handle, u32 ost_source)
+{
+	struct acpi_device *device = NULL;
+	u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
+	int error;
+
+	mutex_lock(&acpi_scan_lock);
+
+	acpi_bus_get_device(handle, &device);
+	if (device) {
+		dev_warn(&device->dev, "Attempt to re-insert\n");
+		goto out;
+	}
+	acpi_evaluate_hotplug_ost(handle, ost_source,
+				  ACPI_OST_SC_INSERT_IN_PROGRESS, NULL);
+	error = acpi_bus_scan(handle);
+	if (error) {
+		acpi_handle_warn(handle, "Namespace scan failure\n");
+		goto out;
+	}
+	error = acpi_bus_get_device(handle, &device);
+	if (error) {
+		acpi_handle_warn(handle, "Missing device node object\n");
+		goto out;
+	}
+	ost_code = ACPI_OST_SC_SUCCESS;
+	if (device->handler && device->handler->hotplug.mode == AHM_CONTAINER)
+		kobject_uevent(&device->dev.kobj, KOBJ_ONLINE);
+
+ out:
+	acpi_evaluate_hotplug_ost(handle, ost_source, ost_code, NULL);
+	mutex_unlock(&acpi_scan_lock);
+}
+
+static void acpi_scan_bus_check(void *context)
+{
+	acpi_scan_bus_device_check((acpi_handle)context,
+				   ACPI_NOTIFY_BUS_CHECK);
+}
+
+static void acpi_scan_device_check(void *context)
+{
+	acpi_scan_bus_device_check((acpi_handle)context,
+				   ACPI_NOTIFY_DEVICE_CHECK);
+}
+
+static void acpi_hotplug_unsupported(acpi_handle handle, u32 type)
+{
+	u32 ost_status;
+
+	switch (type) {
+	case ACPI_NOTIFY_BUS_CHECK:
+		acpi_handle_debug(handle,
+			"ACPI_NOTIFY_BUS_CHECK event: unsupported\n");
+		ost_status = ACPI_OST_SC_INSERT_NOT_SUPPORTED;
+		break;
+	case ACPI_NOTIFY_DEVICE_CHECK:
+		acpi_handle_debug(handle,
+			"ACPI_NOTIFY_DEVICE_CHECK event: unsupported\n");
+		ost_status = ACPI_OST_SC_INSERT_NOT_SUPPORTED;
+		break;
+	case ACPI_NOTIFY_EJECT_REQUEST:
+		acpi_handle_debug(handle,
+			"ACPI_NOTIFY_EJECT_REQUEST event: unsupported\n");
+		ost_status = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
+		break;
+	default:
+		/* non-hotplug event; possibly handled by other handler */
+		return;
+	}
+
+	acpi_evaluate_hotplug_ost(handle, type, ost_status, NULL);
+}
+
+static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
+{
+	acpi_osd_exec_callback callback;
+	struct acpi_scan_handler *handler = data;
+	acpi_status status;
+
+	if (!handler->hotplug.enabled)
+		return acpi_hotplug_unsupported(handle, type);
+
+	switch (type) {
+	case ACPI_NOTIFY_BUS_CHECK:
+		acpi_handle_debug(handle, "ACPI_NOTIFY_BUS_CHECK event\n");
+		callback = acpi_scan_bus_check;
+		break;
+	case ACPI_NOTIFY_DEVICE_CHECK:
+		acpi_handle_debug(handle, "ACPI_NOTIFY_DEVICE_CHECK event\n");
+		callback = acpi_scan_device_check;
+		break;
+	case ACPI_NOTIFY_EJECT_REQUEST:
+		acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
+		callback = acpi_bus_device_eject;
+		break;
+	default:
+		/* non-hotplug event; possibly handled by other handler */
+		return;
+	}
+	status = acpi_os_hotplug_execute(callback, handle);
+	if (ACPI_FAILURE(status))
+		acpi_evaluate_hotplug_ost(handle, type,
+					  ACPI_OST_SC_NON_SPECIFIC_FAILURE,
+					  NULL);
+}
+
+/**
+ * acpi_bus_hot_remove_device: hot-remove a device and its children
+ * @context: struct acpi_eject_event pointer (freed in this func)
+ *
+ * Hot-remove a device and its children. This function frees up the
+ * memory space passed by arg context, so that the caller may call
+ * this function asynchronously through acpi_os_hotplug_execute().
+ */
+void acpi_bus_hot_remove_device(void *context)
+{
+	struct acpi_eject_event *ej_event = context;
+	struct acpi_device *device = ej_event->device;
+	acpi_handle handle = device->handle;
+	int error;
+
+	mutex_lock(&acpi_scan_lock);
+
+	error = acpi_scan_hot_remove(device);
+	if (error && handle)
+		acpi_evaluate_hotplug_ost(handle, ej_event->event,
+					  ACPI_OST_SC_NON_SPECIFIC_FAILURE,
+					  NULL);
+
+	mutex_unlock(&acpi_scan_lock);
+	kfree(context);
 }
 EXPORT_SYMBOL(acpi_bus_hot_remove_device);
 
@@ -206,51 +394,61 @@
 acpi_eject_store(struct device *d, struct device_attribute *attr,
 		const char *buf, size_t count)
 {
-	int ret = count;
-	acpi_status status;
-	acpi_object_type type = 0;
 	struct acpi_device *acpi_device = to_acpi_device(d);
 	struct acpi_eject_event *ej_event;
+	acpi_object_type not_used;
+	acpi_status status;
+	u32 ost_source;
+	int ret;
 
-	if ((!count) || (buf[0] != '1')) {
+	if (!count || buf[0] != '1')
 		return -EINVAL;
-	}
-	if (!acpi_device->driver && !acpi_device->handler) {
-		ret = -ENODEV;
-		goto err;
-	}
-	status = acpi_get_type(acpi_device->handle, &type);
-	if (ACPI_FAILURE(status) || (!acpi_device->flags.ejectable)) {
-		ret = -ENODEV;
-		goto err;
-	}
 
+	if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled)
+	    && !acpi_device->driver)
+		return -ENODEV;
+
+	status = acpi_get_type(acpi_device->handle, &not_used);
+	if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
+		return -ENODEV;
+
+	mutex_lock(&acpi_scan_lock);
+
+	if (acpi_device->flags.eject_pending) {
+		/* ACPI eject notification event. */
+		ost_source = ACPI_NOTIFY_EJECT_REQUEST;
+		acpi_device->flags.eject_pending = 0;
+	} else {
+		/* Eject initiated by user space. */
+		ost_source = ACPI_OST_EC_OSPM_EJECT;
+	}
 	ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
 	if (!ej_event) {
 		ret = -ENOMEM;
-		goto err;
+		goto err_out;
 	}
-
-	get_device(&acpi_device->dev);
+	acpi_evaluate_hotplug_ost(acpi_device->handle, ost_source,
+				  ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
 	ej_event->device = acpi_device;
-	if (acpi_device->flags.eject_pending) {
-		/* event originated from ACPI eject notification */
-		ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
-		acpi_device->flags.eject_pending = 0;
-	} else {
-		/* event originated from user */
-		ej_event->event = ACPI_OST_EC_OSPM_EJECT;
-		(void) acpi_evaluate_hotplug_ost(acpi_device->handle,
-			ej_event->event, ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
-	}
-
+	ej_event->event = ost_source;
+	get_device(&acpi_device->dev);
 	status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
 	if (ACPI_FAILURE(status)) {
 		put_device(&acpi_device->dev);
 		kfree(ej_event);
+		ret = status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
+		goto err_out;
 	}
-err:
+	ret = count;
+
+ out:
+	mutex_unlock(&acpi_scan_lock);
 	return ret;
+
+ err_out:
+	acpi_evaluate_hotplug_ost(acpi_device->handle, ost_source,
+				  ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
+	goto out;
 }
 
 static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
@@ -376,7 +574,7 @@
 			goto end;
 	}
 
-	if (dev->flags.bus_address)
+	if (dev->pnp.type.bus_address)
 		result = device_create_file(&dev->dev, &dev_attr_adr);
 	if (dev->pnp.unique_id)
 		result = device_create_file(&dev->dev, &dev_attr_uid);
@@ -449,7 +647,7 @@
 
 	if (dev->pnp.unique_id)
 		device_remove_file(&dev->dev, &dev_attr_uid);
-	if (dev->flags.bus_address)
+	if (dev->pnp.type.bus_address)
 		device_remove_file(&dev->dev, &dev_attr_adr);
 	device_remove_file(&dev->dev, &dev_attr_modalias);
 	device_remove_file(&dev->dev, &dev_attr_hid);
@@ -512,17 +710,6 @@
 }
 EXPORT_SYMBOL(acpi_match_device_ids);
 
-void acpi_free_ids(struct acpi_device *device)
-{
-	struct acpi_hardware_id *id, *tmp;
-
-	list_for_each_entry_safe(id, tmp, &device->pnp.ids, list) {
-		kfree(id->id);
-		kfree(id);
-	}
-	kfree(device->pnp.unique_id);
-}
-
 static void acpi_free_power_resources_lists(struct acpi_device *device)
 {
 	int i;
@@ -543,7 +730,7 @@
 {
 	struct acpi_device *acpi_dev = to_acpi_device(dev);
 
-	acpi_free_ids(acpi_dev);
+	acpi_free_pnp_ids(&acpi_dev->pnp);
 	acpi_free_power_resources_lists(acpi_dev);
 	kfree(acpi_dev);
 }
@@ -1256,19 +1443,17 @@
 }
 
 /*
- * acpi_bay_match - see if a device is an ejectable driver bay
+ * acpi_bay_match - see if an acpi object is an ejectable driver bay
  *
  * If an acpi object is ejectable and has one of the ACPI ATA methods defined,
  * then we can safely call it an ejectable drive bay
  */
-static int acpi_bay_match(struct acpi_device *device){
+static int acpi_bay_match(acpi_handle handle)
+{
 	acpi_status status;
-	acpi_handle handle;
 	acpi_handle tmp;
 	acpi_handle phandle;
 
-	handle = device->handle;
-
 	status = acpi_get_handle(handle, "_EJ0", &tmp);
 	if (ACPI_FAILURE(status))
 		return -ENODEV;
@@ -1292,12 +1477,12 @@
 }
 
 /*
- * acpi_dock_match - see if a device has a _DCK method
+ * acpi_dock_match - see if an acpi object has a _DCK method
  */
-static int acpi_dock_match(struct acpi_device *device)
+static int acpi_dock_match(acpi_handle handle)
 {
 	acpi_handle tmp;
-	return acpi_get_handle(device->handle, "_DCK", &tmp);
+	return acpi_get_handle(handle, "_DCK", &tmp);
 }
 
 const char *acpi_device_hid(struct acpi_device *device)
@@ -1312,7 +1497,7 @@
 }
 EXPORT_SYMBOL(acpi_device_hid);
 
-static void acpi_add_id(struct acpi_device *device, const char *dev_id)
+static void acpi_add_id(struct acpi_device_pnp *pnp, const char *dev_id)
 {
 	struct acpi_hardware_id *id;
 
@@ -1326,7 +1511,8 @@
 		return;
 	}
 
-	list_add_tail(&id->list, &device->pnp.ids);
+	list_add_tail(&id->list, &pnp->ids);
+	pnp->type.hardware_id = 1;
 }
 
 /*
@@ -1334,7 +1520,7 @@
  * lacks the SMBUS01 HID and the methods do not have the necessary "_"
  * prefix.  Work around this.
  */
-static int acpi_ibm_smbus_match(struct acpi_device *device)
+static int acpi_ibm_smbus_match(acpi_handle handle)
 {
 	acpi_handle h_dummy;
 	struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
@@ -1344,7 +1530,7 @@
 		return -ENODEV;
 
 	/* Look for SMBS object */
-	result = acpi_get_name(device->handle, ACPI_SINGLE_NAME, &path);
+	result = acpi_get_name(handle, ACPI_SINGLE_NAME, &path);
 	if (result)
 		return result;
 
@@ -1355,48 +1541,50 @@
 
 	/* Does it have the necessary (but misnamed) methods? */
 	result = -ENODEV;
-	if (ACPI_SUCCESS(acpi_get_handle(device->handle, "SBI", &h_dummy)) &&
-	    ACPI_SUCCESS(acpi_get_handle(device->handle, "SBR", &h_dummy)) &&
-	    ACPI_SUCCESS(acpi_get_handle(device->handle, "SBW", &h_dummy)))
+	if (ACPI_SUCCESS(acpi_get_handle(handle, "SBI", &h_dummy)) &&
+	    ACPI_SUCCESS(acpi_get_handle(handle, "SBR", &h_dummy)) &&
+	    ACPI_SUCCESS(acpi_get_handle(handle, "SBW", &h_dummy)))
 		result = 0;
 out:
 	kfree(path.pointer);
 	return result;
 }
 
-static void acpi_device_set_id(struct acpi_device *device)
+static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
+				int device_type)
 {
 	acpi_status status;
 	struct acpi_device_info *info;
 	struct acpi_pnp_device_id_list *cid_list;
 	int i;
 
-	switch (device->device_type) {
+	switch (device_type) {
 	case ACPI_BUS_TYPE_DEVICE:
-		if (ACPI_IS_ROOT_DEVICE(device)) {
-			acpi_add_id(device, ACPI_SYSTEM_HID);
+		if (handle == ACPI_ROOT_OBJECT) {
+			acpi_add_id(pnp, ACPI_SYSTEM_HID);
 			break;
 		}
 
-		status = acpi_get_object_info(device->handle, &info);
+		status = acpi_get_object_info(handle, &info);
 		if (ACPI_FAILURE(status)) {
-			printk(KERN_ERR PREFIX "%s: Error reading device info\n", __func__);
+			pr_err(PREFIX "%s: Error reading device info\n",
+					__func__);
 			return;
 		}
 
 		if (info->valid & ACPI_VALID_HID)
-			acpi_add_id(device, info->hardware_id.string);
+			acpi_add_id(pnp, info->hardware_id.string);
 		if (info->valid & ACPI_VALID_CID) {
 			cid_list = &info->compatible_id_list;
 			for (i = 0; i < cid_list->count; i++)
-				acpi_add_id(device, cid_list->ids[i].string);
+				acpi_add_id(pnp, cid_list->ids[i].string);
 		}
 		if (info->valid & ACPI_VALID_ADR) {
-			device->pnp.bus_address = info->address;
-			device->flags.bus_address = 1;
+			pnp->bus_address = info->address;
+			pnp->type.bus_address = 1;
 		}
 		if (info->valid & ACPI_VALID_UID)
-			device->pnp.unique_id = kstrdup(info->unique_id.string,
+			pnp->unique_id = kstrdup(info->unique_id.string,
 							GFP_KERNEL);
 
 		kfree(info);
@@ -1405,40 +1593,50 @@
 		 * Some devices don't reliably have _HIDs & _CIDs, so add
 		 * synthetic HIDs to make sure drivers can find them.
 		 */
-		if (acpi_is_video_device(device))
-			acpi_add_id(device, ACPI_VIDEO_HID);
-		else if (ACPI_SUCCESS(acpi_bay_match(device)))
-			acpi_add_id(device, ACPI_BAY_HID);
-		else if (ACPI_SUCCESS(acpi_dock_match(device)))
-			acpi_add_id(device, ACPI_DOCK_HID);
-		else if (!acpi_ibm_smbus_match(device))
-			acpi_add_id(device, ACPI_SMBUS_IBM_HID);
-		else if (list_empty(&device->pnp.ids) &&
-			 ACPI_IS_ROOT_DEVICE(device->parent)) {
-			acpi_add_id(device, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
-			strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
-			strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
+		if (acpi_is_video_device(handle))
+			acpi_add_id(pnp, ACPI_VIDEO_HID);
+		else if (ACPI_SUCCESS(acpi_bay_match(handle)))
+			acpi_add_id(pnp, ACPI_BAY_HID);
+		else if (ACPI_SUCCESS(acpi_dock_match(handle)))
+			acpi_add_id(pnp, ACPI_DOCK_HID);
+		else if (!acpi_ibm_smbus_match(handle))
+			acpi_add_id(pnp, ACPI_SMBUS_IBM_HID);
+		else if (list_empty(&pnp->ids) && handle == ACPI_ROOT_OBJECT) {
+			acpi_add_id(pnp, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
+			strcpy(pnp->device_name, ACPI_BUS_DEVICE_NAME);
+			strcpy(pnp->device_class, ACPI_BUS_CLASS);
 		}
 
 		break;
 	case ACPI_BUS_TYPE_POWER:
-		acpi_add_id(device, ACPI_POWER_HID);
+		acpi_add_id(pnp, ACPI_POWER_HID);
 		break;
 	case ACPI_BUS_TYPE_PROCESSOR:
-		acpi_add_id(device, ACPI_PROCESSOR_OBJECT_HID);
+		acpi_add_id(pnp, ACPI_PROCESSOR_OBJECT_HID);
 		break;
 	case ACPI_BUS_TYPE_THERMAL:
-		acpi_add_id(device, ACPI_THERMAL_HID);
+		acpi_add_id(pnp, ACPI_THERMAL_HID);
 		break;
 	case ACPI_BUS_TYPE_POWER_BUTTON:
-		acpi_add_id(device, ACPI_BUTTON_HID_POWERF);
+		acpi_add_id(pnp, ACPI_BUTTON_HID_POWERF);
 		break;
 	case ACPI_BUS_TYPE_SLEEP_BUTTON:
-		acpi_add_id(device, ACPI_BUTTON_HID_SLEEPF);
+		acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF);
 		break;
 	}
 }
 
+void acpi_free_pnp_ids(struct acpi_device_pnp *pnp)
+{
+	struct acpi_hardware_id *id, *tmp;
+
+	list_for_each_entry_safe(id, tmp, &pnp->ids, list) {
+		kfree(id->id);
+		kfree(id);
+	}
+	kfree(pnp->unique_id);
+}
+
 void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
 			     int type, unsigned long long sta)
 {
@@ -1448,7 +1646,7 @@
 	device->parent = acpi_bus_get_parent(handle);
 	STRUCT_TO_INT(device->status) = sta;
 	acpi_device_get_busid(device);
-	acpi_device_set_id(device);
+	acpi_set_pnp_ids(handle, &device->pnp, type);
 	acpi_bus_get_flags(device);
 	device->flags.match_driver = false;
 	device_initialize(&device->dev);
@@ -1536,6 +1734,75 @@
 	return 0;
 }
 
+static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler,
+				       char *idstr,
+				       const struct acpi_device_id **matchid)
+{
+	const struct acpi_device_id *devid;
+
+	for (devid = handler->ids; devid->id[0]; devid++)
+		if (!strcmp((char *)devid->id, idstr)) {
+			if (matchid)
+				*matchid = devid;
+
+			return true;
+		}
+
+	return false;
+}
+
+static struct acpi_scan_handler *acpi_scan_match_handler(char *idstr,
+					const struct acpi_device_id **matchid)
+{
+	struct acpi_scan_handler *handler;
+
+	list_for_each_entry(handler, &acpi_scan_handlers_list, list_node)
+		if (acpi_scan_handler_matching(handler, idstr, matchid))
+			return handler;
+
+	return NULL;
+}
+
+void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val)
+{
+	if (!!hotplug->enabled == !!val)
+		return;
+
+	mutex_lock(&acpi_scan_lock);
+
+	hotplug->enabled = val;
+
+	mutex_unlock(&acpi_scan_lock);
+}
+
+static void acpi_scan_init_hotplug(acpi_handle handle, int type)
+{
+	struct acpi_device_pnp pnp = {};
+	struct acpi_hardware_id *hwid;
+	struct acpi_scan_handler *handler;
+
+	INIT_LIST_HEAD(&pnp.ids);
+	acpi_set_pnp_ids(handle, &pnp, type);
+
+	if (!pnp.type.hardware_id)
+		return;
+
+	/*
+	 * This relies on the fact that acpi_install_notify_handler() will not
+	 * install the same notify handler routine twice for the same handle.
+	 */
+	list_for_each_entry(hwid, &pnp.ids, list) {
+		handler = acpi_scan_match_handler(hwid->id, NULL);
+		if (handler) {
+			acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+					acpi_hotplug_notify_cb, handler);
+			break;
+		}
+	}
+
+	acpi_free_pnp_ids(&pnp);
+}
+
 static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
 				      void *not_used, void **return_value)
 {
@@ -1558,6 +1825,8 @@
 		return AE_OK;
 	}
 
+	acpi_scan_init_hotplug(handle, type);
+
 	if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
 	    !(sta & ACPI_STA_DEVICE_FUNCTIONING)) {
 		struct acpi_device_wakeup wakeup;
@@ -1583,41 +1852,25 @@
 	return AE_OK;
 }
 
-static int acpi_scan_do_attach_handler(struct acpi_device *device, char *id)
-{
-	struct acpi_scan_handler *handler;
-
-	list_for_each_entry(handler, &acpi_scan_handlers_list, list_node) {
-		const struct acpi_device_id *devid;
-
-		for (devid = handler->ids; devid->id[0]; devid++) {
-			int ret;
-
-			if (strcmp((char *)devid->id, id))
-				continue;
-
-			ret = handler->attach(device, devid);
-			if (ret > 0) {
-				device->handler = handler;
-				return ret;
-			} else if (ret < 0) {
-				return ret;
-			}
-		}
-	}
-	return 0;
-}
-
 static int acpi_scan_attach_handler(struct acpi_device *device)
 {
 	struct acpi_hardware_id *hwid;
 	int ret = 0;
 
 	list_for_each_entry(hwid, &device->pnp.ids, list) {
-		ret = acpi_scan_do_attach_handler(device, hwid->id);
-		if (ret)
-			break;
+		const struct acpi_device_id *devid;
+		struct acpi_scan_handler *handler;
 
+		handler = acpi_scan_match_handler(hwid->id, &devid);
+		if (handler) {
+			ret = handler->attach(device, devid);
+			if (ret > 0) {
+				device->handler = handler;
+				break;
+			} else if (ret < 0) {
+				break;
+			}
+		}
 	}
 	return ret;
 }
@@ -1788,8 +2041,10 @@
 	acpi_pci_root_init();
 	acpi_pci_link_init();
 	acpi_platform_init();
+	acpi_lpss_init();
 	acpi_csrt_init();
 	acpi_container_init();
+	acpi_memory_hotplug_init();
 
 	mutex_lock(&acpi_scan_lock);
 	/*
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 41c0504..fcae5fa 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -7,6 +7,8 @@
 #include <linux/moduleparam.h>
 #include <acpi/acpi_drivers.h>
 
+#include "internal.h"
+
 #define _COMPONENT		ACPI_SYSTEM_COMPONENT
 ACPI_MODULE_NAME("sysfs");
 
@@ -249,6 +251,7 @@
 static LIST_HEAD(acpi_table_attr_list);
 static struct kobject *tables_kobj;
 static struct kobject *dynamic_tables_kobj;
+static struct kobject *hotplug_kobj;
 
 struct acpi_table_attr {
 	struct bin_attribute attr;
@@ -716,6 +719,67 @@
 static const struct device_attribute pm_profile_attr =
 	__ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
 
+static ssize_t hotplug_enabled_show(struct kobject *kobj,
+				    struct kobj_attribute *attr, char *buf)
+{
+	struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
+
+	return sprintf(buf, "%d\n", hotplug->enabled);
+}
+
+static ssize_t hotplug_enabled_store(struct kobject *kobj,
+				     struct kobj_attribute *attr,
+				     const char *buf, size_t size)
+{
+	struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
+	unsigned int val;
+
+	if (kstrtouint(buf, 10, &val) || val > 1)
+		return -EINVAL;
+
+	acpi_scan_hotplug_enabled(hotplug, val);
+	return size;
+}
+
+static struct kobj_attribute hotplug_enabled_attr =
+	__ATTR(enabled, S_IRUGO | S_IWUSR, hotplug_enabled_show,
+		hotplug_enabled_store);
+
+static struct attribute *hotplug_profile_attrs[] = {
+	&hotplug_enabled_attr.attr,
+	NULL
+};
+
+static struct kobj_type acpi_hotplug_profile_ktype = {
+	.sysfs_ops = &kobj_sysfs_ops,
+	.default_attrs = hotplug_profile_attrs,
+};
+
+void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
+				    const char *name)
+{
+	int error;
+
+	if (!hotplug_kobj)
+		goto err_out;
+
+	kobject_init(&hotplug->kobj, &acpi_hotplug_profile_ktype);
+	error = kobject_set_name(&hotplug->kobj, "%s", name);
+	if (error)
+		goto err_out;
+
+	hotplug->kobj.parent = hotplug_kobj;
+	error = kobject_add(&hotplug->kobj, hotplug_kobj, NULL);
+	if (error)
+		goto err_out;
+
+	kobject_uevent(&hotplug->kobj, KOBJ_ADD);
+	return;
+
+ err_out:
+	pr_err(PREFIX "Unable to add hotplug profile '%s'\n", name);
+}
+
 int __init acpi_sysfs_init(void)
 {
 	int result;
@@ -723,6 +787,8 @@
 	result = acpi_tables_sysfs_init();
 	if (result)
 		return result;
+
+	hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
 	result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
 	return result;
 }
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 8470771..a33821c 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -723,9 +723,19 @@
 		return -EINVAL;
 
 	if (type == THERMAL_TRIP_ACTIVE) {
-		/* aggressive active cooling */
-		*trend = THERMAL_TREND_RAISING;
-		return 0;
+		unsigned long trip_temp;
+		unsigned long temp = KELVIN_TO_MILLICELSIUS(tz->temperature,
+							tz->kelvin_offset);
+		if (thermal_get_trip_temp(thermal, trip, &trip_temp))
+			return -EINVAL;
+
+		if (temp > trip_temp) {
+			*trend = THERMAL_TREND_RAISING;
+			return 0;
+		} else {
+			/* Fall back on default trend */
+			return -EINVAL;
+		}
 	}
 
 	/*
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 313f959..c3932d0 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -167,7 +167,8 @@
 	u8 dvi:1;
 	u8 bios:1;
 	u8 unknown:1;
-	u8 reserved:2;
+	u8 notify:1;
+	u8 reserved:1;
 };
 
 struct acpi_video_device_cap {
@@ -222,7 +223,7 @@
 			int level);
 static int acpi_video_device_lcd_get_level_current(
 			struct acpi_video_device *device,
-			unsigned long long *level, int init);
+			unsigned long long *level, bool raw);
 static int acpi_video_get_next_level(struct acpi_video_device *device,
 				     u32 level_current, u32 event);
 static int acpi_video_switch_brightness(struct acpi_video_device *device,
@@ -236,7 +237,7 @@
 	struct acpi_video_device *vd =
 		(struct acpi_video_device *)bl_get_data(bd);
 
-	if (acpi_video_device_lcd_get_level_current(vd, &cur_level, 0))
+	if (acpi_video_device_lcd_get_level_current(vd, &cur_level, false))
 		return -EINVAL;
 	for (i = 2; i < vd->brightness->count; i++) {
 		if (vd->brightness->levels[i] == cur_level)
@@ -281,7 +282,7 @@
 	unsigned long long level;
 	int offset;
 
-	if (acpi_video_device_lcd_get_level_current(video, &level, 0))
+	if (acpi_video_device_lcd_get_level_current(video, &level, false))
 		return -EINVAL;
 	for (offset = 2; offset < video->brightness->count; offset++)
 		if (level == video->brightness->levels[offset]) {
@@ -447,12 +448,45 @@
 		DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"),
 		},
 	},
+	{
+	 .callback = video_ignore_initial_backlight,
+	 .ident = "HP Pavilion dm4",
+	 .matches = {
+		DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+		DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"),
+		},
+	},
 	{}
 };
 
+static unsigned long long
+acpi_video_bqc_value_to_level(struct acpi_video_device *device,
+			      unsigned long long bqc_value)
+{
+	unsigned long long level;
+
+	if (device->brightness->flags._BQC_use_index) {
+		/*
+		 * _BQC returns an index that doesn't account for
+		 * the first 2 items with special meaning, so we need
+		 * to compensate for that by offsetting ourselves
+		 */
+		if (device->brightness->flags._BCL_reversed)
+			bqc_value = device->brightness->count - 3 - bqc_value;
+
+		level = device->brightness->levels[bqc_value + 2];
+	} else {
+		level = bqc_value;
+	}
+
+	level += bqc_offset_aml_bug_workaround;
+
+	return level;
+}
+
 static int
 acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
-					unsigned long long *level, int init)
+					unsigned long long *level, bool raw)
 {
 	acpi_status status = AE_OK;
 	int i;
@@ -463,29 +497,30 @@
 		status = acpi_evaluate_integer(device->dev->handle, buf,
 						NULL, level);
 		if (ACPI_SUCCESS(status)) {
-			if (device->brightness->flags._BQC_use_index) {
-				if (device->brightness->flags._BCL_reversed)
-					*level = device->brightness->count
-								 - 3 - (*level);
-				*level = device->brightness->levels[*level + 2];
-
+			if (raw) {
+				/*
+				 * Caller has indicated he wants the raw
+				 * value returned by _BQC, so don't furtherly
+				 * mess with the value.
+				 */
+				return 0;
 			}
-			*level += bqc_offset_aml_bug_workaround;
+
+			*level = acpi_video_bqc_value_to_level(device, *level);
+
 			for (i = 2; i < device->brightness->count; i++)
 				if (device->brightness->levels[i] == *level) {
 					device->brightness->curr = *level;
 					return 0;
 			}
-			if (!init) {
-				/*
-				 * BQC returned an invalid level.
-				 * Stop using it.
-				 */
-				ACPI_WARNING((AE_INFO,
-					      "%s returned an invalid level",
-					      buf));
-				device->cap._BQC = device->cap._BCQ = 0;
-			}
+			/*
+			 * BQC returned an invalid level.
+			 * Stop using it.
+			 */
+			ACPI_WARNING((AE_INFO,
+				      "%s returned an invalid level",
+				      buf));
+			device->cap._BQC = device->cap._BCQ = 0;
 		} else {
 			/* Fixme:
 			 * should we return an error or ignore this failure?
@@ -598,6 +633,56 @@
 }
 
 /*
+ * Decides if _BQC/_BCQ for this system is usable
+ *
+ * We do this by changing the level first and then read out the current
+ * brightness level, if the value does not match, find out if it is using
+ * index. If not, clear the _BQC/_BCQ capability.
+ */
+static int acpi_video_bqc_quirk(struct acpi_video_device *device,
+				int max_level, int current_level)
+{
+	struct acpi_video_device_brightness *br = device->brightness;
+	int result;
+	unsigned long long level;
+	int test_level;
+
+	/* don't mess with existing known broken systems */
+	if (bqc_offset_aml_bug_workaround)
+		return 0;
+
+	/*
+	 * Some systems always report current brightness level as maximum
+	 * through _BQC, we need to test another value for them.
+	 */
+	test_level = current_level == max_level ? br->levels[2] : max_level;
+
+	result = acpi_video_device_lcd_set_level(device, test_level);
+	if (result)
+		return result;
+
+	result = acpi_video_device_lcd_get_level_current(device, &level, true);
+	if (result)
+		return result;
+
+	if (level != test_level) {
+		/* buggy _BQC found, need to find out if it uses index */
+		if (level < br->count) {
+			if (br->flags._BCL_reversed)
+				level = br->count - 3 - level;
+			if (br->levels[level + 2] == test_level)
+				br->flags._BQC_use_index = 1;
+		}
+
+		if (!br->flags._BQC_use_index)
+			device->cap._BQC = device->cap._BCQ = 0;
+	}
+
+	return 0;
+}
+
+
+/*
  *  Arg:	
  *  	device	: video output device (LCD, CRT, ..)
  *
@@ -703,41 +788,35 @@
 	if (!device->cap._BQC)
 		goto set_level;
 
-	result = acpi_video_device_lcd_get_level_current(device, &level_old, 1);
+	result = acpi_video_device_lcd_get_level_current(device,
+							 &level_old, true);
 	if (result)
 		goto out_free_levels;
 
+	result = acpi_video_bqc_quirk(device, max_level, level_old);
+	if (result)
+		goto out_free_levels;
 	/*
-	 * Set the level to maximum and check if _BQC uses indexed value
+	 * cap._BQC may get cleared due to _BQC is found to be broken
+	 * in acpi_video_bqc_quirk, so check again here.
 	 */
-	result = acpi_video_device_lcd_set_level(device, max_level);
-	if (result)
-		goto out_free_levels;
-
-	result = acpi_video_device_lcd_get_level_current(device, &level, 0);
-	if (result)
-		goto out_free_levels;
-
-	br->flags._BQC_use_index = (level == max_level ? 0 : 1);
-
-	if (!br->flags._BQC_use_index) {
-		/*
-		 * Set the backlight to the initial state.
-		 * On some buggy laptops, _BQC returns an uninitialized value
-		 * when invoked for the first time, i.e. level_old is invalid.
-		 * set the backlight to max_level in this case
-		 */
-		if (use_bios_initial_backlight) {
-			for (i = 2; i < br->count; i++)
-				if (level_old == br->levels[i])
-					level = level_old;
-		}
+	if (!device->cap._BQC)
 		goto set_level;
-	}
 
-	if (br->flags._BCL_reversed)
-		level_old = (br->count - 1) - level_old;
-	level = br->levels[level_old];
+	if (use_bios_initial_backlight) {
+		level = acpi_video_bqc_value_to_level(device, level_old);
+		/*
+		 * On some buggy laptops, _BQC returns an uninitialized
+		 * value when invoked for the first time, i.e.
+		 * level_old is invalid (no matter whether it's a level
+		 * or an index). Set the backlight to max_level in this case.
+		 */
+		for (i = 2; i < br->count; i++)
+			if (level_old == br->levels[i])
+				break;
+		if (i == br->count)
+			level = max_level;
+	}
 
 set_level:
 	result = acpi_video_device_lcd_set_level(device, level);
@@ -996,53 +1075,51 @@
 	struct acpi_video_device *data;
 	struct acpi_video_device_attrib* attribute;
 
-	if (!device || !video)
-		return -EINVAL;
-
 	status =
 	    acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id);
-	if (ACPI_SUCCESS(status)) {
+	/* Some device omits _ADR, we skip them instead of fail */
+	if (ACPI_FAILURE(status))
+		return 0;
 
-		data = kzalloc(sizeof(struct acpi_video_device), GFP_KERNEL);
-		if (!data)
-			return -ENOMEM;
+	data = kzalloc(sizeof(struct acpi_video_device), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
 
-		strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
-		strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
-		device->driver_data = data;
+	strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
+	strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
+	device->driver_data = data;
 
-		data->device_id = device_id;
-		data->video = video;
-		data->dev = device;
+	data->device_id = device_id;
+	data->video = video;
+	data->dev = device;
 
-		attribute = acpi_video_get_device_attr(video, device_id);
+	attribute = acpi_video_get_device_attr(video, device_id);
 
-		if((attribute != NULL) && attribute->device_id_scheme) {
-			switch (attribute->display_type) {
-			case ACPI_VIDEO_DISPLAY_CRT:
-				data->flags.crt = 1;
-				break;
-			case ACPI_VIDEO_DISPLAY_TV:
-				data->flags.tvout = 1;
-				break;
-			case ACPI_VIDEO_DISPLAY_DVI:
-				data->flags.dvi = 1;
-				break;
-			case ACPI_VIDEO_DISPLAY_LCD:
-				data->flags.lcd = 1;
-				break;
-			default:
-				data->flags.unknown = 1;
-				break;
-			}
-			if(attribute->bios_can_detect)
-				data->flags.bios = 1;
-		} else {
-			/* Check for legacy IDs */
-			device_type = acpi_video_get_device_type(video,
-								 device_id);
-			/* Ignore bits 16 and 18-20 */
-			switch (device_type & 0xffe2ffff) {
+	if((attribute != NULL) && attribute->device_id_scheme) {
+		switch (attribute->display_type) {
+		case ACPI_VIDEO_DISPLAY_CRT:
+			data->flags.crt = 1;
+			break;
+		case ACPI_VIDEO_DISPLAY_TV:
+			data->flags.tvout = 1;
+			break;
+		case ACPI_VIDEO_DISPLAY_DVI:
+			data->flags.dvi = 1;
+			break;
+		case ACPI_VIDEO_DISPLAY_LCD:
+			data->flags.lcd = 1;
+			break;
+		default:
+			data->flags.unknown = 1;
+			break;
+		}
+		if(attribute->bios_can_detect)
+			data->flags.bios = 1;
+	} else {
+		/* Check for legacy IDs */
+		device_type = acpi_video_get_device_type(video, device_id);
+		/* Ignore bits 16 and 18-20 */
+		switch (device_type & 0xffe2ffff) {
 			case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR:
 				data->flags.crt = 1;
 				break;
@@ -1054,34 +1131,24 @@
 				break;
 			default:
 				data->flags.unknown = 1;
-			}
 		}
-
-		acpi_video_device_bind(video, data);
-		acpi_video_device_find_cap(data);
-
-		status = acpi_install_notify_handler(device->handle,
-						     ACPI_DEVICE_NOTIFY,
-						     acpi_video_device_notify,
-						     data);
-		if (ACPI_FAILURE(status)) {
-			printk(KERN_ERR PREFIX
-					  "Error installing notify handler\n");
-			if(data->brightness)
-				kfree(data->brightness->levels);
-			kfree(data->brightness);
-			kfree(data);
-			return -ENODEV;
-		}
-
-		mutex_lock(&video->device_list_lock);
-		list_add_tail(&data->entry, &video->video_device_list);
-		mutex_unlock(&video->device_list_lock);
-
-		return 0;
 	}
 
-	return -ENOENT;
+	acpi_video_device_bind(video, data);
+	acpi_video_device_find_cap(data);
+
+	status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+					     acpi_video_device_notify, data);
+	if (ACPI_FAILURE(status))
+		dev_err(&device->dev, "Error installing notify handler\n");
+	else
+		data->flags.notify = 1;
+
+	mutex_lock(&video->device_list_lock);
+	list_add_tail(&data->entry, &video->video_device_list);
+	mutex_unlock(&video->device_list_lock);
+
+	return status;
 }
 
 /*
@@ -1268,7 +1335,8 @@
 		goto out;
 
 	result = acpi_video_device_lcd_get_level_current(device,
-							 &level_current, 0);
+							 &level_current,
+							 false);
 	if (result)
 		goto out;
 
@@ -1373,9 +1441,8 @@
 
 		status = acpi_video_bus_get_one_device(dev, video);
 		if (status) {
-			printk(KERN_WARNING PREFIX
-					"Can't attach device\n");
-			continue;
+			dev_err(&dev->dev, "Can't attach device\n");
+			break;
 		}
 	}
 	return status;
@@ -1388,13 +1455,14 @@
 	if (!device || !device->video)
 		return -ENOENT;
 
-	status = acpi_remove_notify_handler(device->dev->handle,
-					    ACPI_DEVICE_NOTIFY,
-					    acpi_video_device_notify);
-	if (ACPI_FAILURE(status)) {
-		printk(KERN_WARNING PREFIX
-		       "Can't remove video notify handler\n");
+	if (device->flags.notify) {
+		status = acpi_remove_notify_handler(device->dev->handle,
+				ACPI_DEVICE_NOTIFY, acpi_video_device_notify);
+		if (ACPI_FAILURE(status))
+			dev_err(&device->dev->dev,
+					"Can't remove video notify handler\n");
 	}
+
 	if (device->backlight) {
 		backlight_device_unregister(device->backlight);
 		device->backlight = NULL;
@@ -1676,7 +1744,7 @@
 
 	error = acpi_video_bus_get_devices(video, device);
 	if (error)
-		goto err_free_video;
+		goto err_put_video;
 
 	video->input = input = input_allocate_device();
 	if (!input) {
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 4ac2593..66f6762 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -67,40 +67,37 @@
 	return 0;
 }
 
-/* Returns true if the device is a video device which can be handled by
- * video.ko.
+/* Returns true if the ACPI object is a video device which can be
+ * handled by video.ko.
  * The device will get a Linux specific CID added in scan.c to
  * identify the device as an ACPI graphics device
  * Be aware that the graphics device may not be physically present
  * Use acpi_video_get_capabilities() to detect general ACPI video
  * capabilities of present cards
  */
-long acpi_is_video_device(struct acpi_device *device)
+long acpi_is_video_device(acpi_handle handle)
 {
 	acpi_handle h_dummy;
 	long video_caps = 0;
 
-	if (!device)
-		return 0;
-
 	/* Is this device able to support video switching ? */
-	if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) ||
-	    ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
+	if (ACPI_SUCCESS(acpi_get_handle(handle, "_DOD", &h_dummy)) ||
+	    ACPI_SUCCESS(acpi_get_handle(handle, "_DOS", &h_dummy)))
 		video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
 
 	/* Is this device able to retrieve a video ROM ? */
-	if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy)))
+	if (ACPI_SUCCESS(acpi_get_handle(handle, "_ROM", &h_dummy)))
 		video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
 
 	/* Is this device able to configure which video head to be POSTed ? */
-	if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_VPO", &h_dummy)) &&
-	    ACPI_SUCCESS(acpi_get_handle(device->handle, "_GPD", &h_dummy)) &&
-	    ACPI_SUCCESS(acpi_get_handle(device->handle, "_SPD", &h_dummy)))
+	if (ACPI_SUCCESS(acpi_get_handle(handle, "_VPO", &h_dummy)) &&
+	    ACPI_SUCCESS(acpi_get_handle(handle, "_GPD", &h_dummy)) &&
+	    ACPI_SUCCESS(acpi_get_handle(handle, "_SPD", &h_dummy)))
 		video_caps |= ACPI_VIDEO_DEVICE_POSTING;
 
 	/* Only check for backlight functionality if one of the above hit. */
 	if (video_caps)
-		acpi_walk_namespace(ACPI_TYPE_DEVICE, device->handle,
+		acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
 				    ACPI_UINT32_MAX, acpi_backlight_cap_match, NULL,
 				    &video_caps, NULL);
 
@@ -127,7 +124,7 @@
 		if (!dev)
 			return AE_OK;
 		pci_dev_put(dev);
-		*cap |= acpi_is_video_device(acpi_dev);
+		*cap |= acpi_is_video_device(handle);
 	}
 	return AE_OK;
 }
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 8a52dab..87f2f39 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -17,7 +17,6 @@
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
-#include <linux/pm_qos.h>
 #include <scsi/scsi_device.h>
 #include "libata.h"
 
@@ -61,7 +60,8 @@
 	if (ap->flags & ATA_FLAG_ACPI_SATA)
 		return NULL;
 
-	return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev), ap->port_no);
+	return ap->scsi_host ?
+		DEVICE_ACPI_HANDLE(&ap->scsi_host->shost_gendev) : NULL;
 }
 EXPORT_SYMBOL(ata_ap_acpi_handle);
 
@@ -77,7 +77,7 @@
 	acpi_integer adr;
 	struct ata_port *ap = dev->link->ap;
 
-	if (dev->flags & ATA_DFLAG_ACPI_DISABLED)
+	if (libata_noacpi || dev->flags & ATA_DFLAG_ACPI_DISABLED)
 		return NULL;
 
 	if (ap->flags & ATA_FLAG_ACPI_SATA) {
@@ -240,28 +240,15 @@
 	}
 }
 
-/**
- * ata_acpi_gtm - execute _GTM
- * @ap: target ATA port
- * @gtm: out parameter for _GTM result
- *
- * Evaluate _GTM and store the result in @gtm.
- *
- * LOCKING:
- * EH context.
- *
- * RETURNS:
- * 0 on success, -ENOENT if _GTM doesn't exist, -errno on failure.
- */
-int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm)
+static int __ata_acpi_gtm(struct ata_port *ap, acpi_handle handle,
+			  struct ata_acpi_gtm *gtm)
 {
 	struct acpi_buffer output = { .length = ACPI_ALLOCATE_BUFFER };
 	union acpi_object *out_obj;
 	acpi_status status;
 	int rc = 0;
 
-	status = acpi_evaluate_object(ata_ap_acpi_handle(ap), "_GTM", NULL,
-				      &output);
+	status = acpi_evaluate_object(handle, "_GTM", NULL, &output);
 
 	rc = -ENOENT;
 	if (status == AE_NOT_FOUND)
@@ -295,6 +282,27 @@
 	return rc;
 }
 
+/**
+ * ata_acpi_gtm - execute _GTM
+ * @ap: target ATA port
+ * @gtm: out parameter for _GTM result
+ *
+ * Evaluate _GTM and store the result in @gtm.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -ENOENT if _GTM doesn't exist, -errno on failure.
+ */
+int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm)
+{
+	if (ata_ap_acpi_handle(ap))
+		return __ata_acpi_gtm(ap, ata_ap_acpi_handle(ap), gtm);
+	else
+		return -EINVAL;
+}
+
 EXPORT_SYMBOL_GPL(ata_acpi_gtm);
 
 /**
@@ -1020,38 +1028,6 @@
 	ata_acpi_clear_gtf(dev);
 }
 
-static void ata_acpi_register_power_resource(struct ata_device *dev)
-{
-	struct scsi_device *sdev = dev->sdev;
-	acpi_handle handle;
-
-	handle = ata_dev_acpi_handle(dev);
-	if (handle)
-		acpi_dev_pm_add_dependent(handle, &sdev->sdev_gendev);
-}
-
-static void ata_acpi_unregister_power_resource(struct ata_device *dev)
-{
-	struct scsi_device *sdev = dev->sdev;
-	acpi_handle handle;
-
-	handle = ata_dev_acpi_handle(dev);
-	if (handle)
-		acpi_dev_pm_remove_dependent(handle, &sdev->sdev_gendev);
-}
-
-void ata_acpi_bind(struct ata_device *dev)
-{
-	ata_acpi_register_power_resource(dev);
-	if (zpodd_dev_enabled(dev))
-		dev_pm_qos_expose_flags(&dev->sdev->sdev_gendev, 0);
-}
-
-void ata_acpi_unbind(struct ata_device *dev)
-{
-	ata_acpi_unregister_power_resource(dev);
-}
-
 static int compat_pci_ata(struct ata_port *ap)
 {
 	struct device *dev = ap->tdev.parent;
@@ -1071,7 +1047,7 @@
 
 static int ata_acpi_bind_host(struct ata_port *ap, acpi_handle *handle)
 {
-	if (ap->flags & ATA_FLAG_ACPI_SATA)
+	if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA)
 		return -ENODEV;
 
 	*handle = acpi_get_child(DEVICE_ACPI_HANDLE(ap->tdev.parent),
@@ -1080,7 +1056,7 @@
 	if (!*handle)
 		return -ENODEV;
 
-	if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
+	if (__ata_acpi_gtm(ap, *handle, &ap->__acpi_init_gtm) == 0)
 		ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
 
 	return 0;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index ff44787..dd310b27 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -49,6 +49,7 @@
 #include <linux/hdreg.h>
 #include <linux/uaccess.h>
 #include <linux/suspend.h>
+#include <linux/pm_qos.h>
 #include <asm/unaligned.h>
 
 #include "libata.h"
@@ -2126,7 +2127,6 @@
 	memcpy(&rbuf[8], "linux   ", 8);
 	memcpy(&rbuf[16], "libata          ", 16);
 	memcpy(&rbuf[32], DRV_VERSION, 4);
-	ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
 
 	/* we don't store the ATA device signature, so we fake it */
 
@@ -3668,7 +3668,9 @@
 			if (!IS_ERR(sdev)) {
 				dev->sdev = sdev;
 				scsi_device_put(sdev);
-				ata_acpi_bind(dev);
+				if (zpodd_dev_enabled(dev))
+					dev_pm_qos_expose_flags(
+							&sdev->sdev_gendev, 0);
 			} else {
 				dev->sdev = NULL;
 			}
@@ -3767,7 +3769,6 @@
 
 	if (zpodd_dev_enabled(dev))
 		zpodd_exit(dev);
-	ata_acpi_unbind(dev);
 
 	/* clearing dev->sdev is protected by host lock */
 	sdev = dev->sdev;
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
index 36f189c..8d493b4 100644
--- a/drivers/ata/pata_at32.c
+++ b/drivers/ata/pata_at32.c
@@ -393,18 +393,7 @@
 	},
 };
 
-static int __init pata_at32_init(void)
-{
-	return platform_driver_probe(&pata_at32_driver, pata_at32_probe);
-}
-
-static void __exit pata_at32_exit(void)
-{
-	platform_driver_unregister(&pata_at32_driver);
-}
-
-module_init(pata_at32_init);
-module_exit(pata_at32_exit);
+module_platform_driver_probe(pata_at32_driver, pata_at32_probe);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("AVR32 SMC/CFC PATA Driver");
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 4084944..aa3d166 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -37,7 +37,7 @@
 struct pata_imx_priv {
 	struct clk *clk;
 	/* timings/interrupt/control regs */
-	u8 *host_regs;
+	void __iomem *host_regs;
 	u32 ata_ctl;
 };
 
@@ -98,6 +98,7 @@
 	struct pata_imx_priv *priv;
 	int irq = 0;
 	struct resource *io_res;
+	int ret;
 
 	io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (io_res == NULL)
@@ -112,7 +113,7 @@
 	if (!priv)
 		return -ENOMEM;
 
-	priv->clk = clk_get(&pdev->dev, NULL);
+	priv->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(priv->clk)) {
 		dev_err(&pdev->dev, "Failed to get clock\n");
 		return PTR_ERR(priv->clk);
@@ -121,8 +122,10 @@
 	clk_prepare_enable(priv->clk);
 
 	host = ata_host_alloc(&pdev->dev, 1);
-	if (!host)
-		goto free_priv;
+	if (!host) {
+		ret = -ENOMEM;
+		goto err;
+	}
 
 	host->private_data = priv;
 	ap = host->ports[0];
@@ -135,7 +138,8 @@
 		resource_size(io_res));
 	if (!priv->host_regs) {
 		dev_err(&pdev->dev, "failed to map IO/CTL base\n");
-		goto free_priv;
+		ret = -EBUSY;
+		goto err;
 	}
 
 	ap->ioaddr.cmd_addr = priv->host_regs + PATA_IMX_DRIVE_DATA;
@@ -158,13 +162,17 @@
 			priv->host_regs + PATA_IMX_ATA_INT_EN);
 
 	/* activate */
-	return ata_host_activate(host, irq, ata_sff_interrupt, 0,
+	ret = ata_host_activate(host, irq, ata_sff_interrupt, 0,
 				&pata_imx_sht);
 
-free_priv:
+	if (ret)
+		goto err;
+
+	return 0;
+err:
 	clk_disable_unprepare(priv->clk);
-	clk_put(priv->clk);
-	return -ENOMEM;
+
+	return ret;
 }
 
 static int pata_imx_remove(struct platform_device *pdev)
@@ -177,7 +185,6 @@
 	__raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
 
 	clk_disable_unprepare(priv->clk);
-	clk_put(priv->clk);
 
 	return 0;
 }
@@ -223,11 +230,20 @@
 };
 #endif
 
+static const struct of_device_id imx_pata_dt_ids[] = {
+	{
+		.compatible = "fsl,imx27-pata",
+	}, {
+		/* sentinel */
+	}
+};
+
 static struct platform_driver pata_imx_driver = {
 	.probe		= pata_imx_probe,
 	.remove		= pata_imx_remove,
 	.driver = {
 		.name		= DRV_NAME,
+		.of_match_table	= imx_pata_dt_ids,
 		.owner		= THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm		= &pata_imx_pm_ops,
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 4fe9d21..be81642 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -542,7 +542,7 @@
 	u8 sysclk;
 
 	/* Get the clock */
-	sysclk = opti_syscfg(0xAC) & 0xC0;	/* BIOS set */
+	sysclk = (opti_syscfg(0xAC) & 0xC0) >> 6;	/* BIOS set */
 
 	/* Enter configuration mode */
 	ioread16(ap->ioaddr.error_addr);
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index ff2e57f..e73bef3 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -926,7 +926,7 @@
 			goto free_cf_port;
 		}
 		cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
-					   res_cs1->end - res_cs1->start + 1);
+					   resource_size(res_cs1));
 
 		if (!cs1)
 			goto free_cf_port;
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index dfc6fd0..d40e403 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -285,6 +285,7 @@
 	int irq;
 	int data_snoop;
 	struct device_attribute intr_coalescing;
+	struct device_attribute rx_watermark;
 };
 
 static void fsl_sata_set_irq_coalescing(struct ata_host *host,
@@ -343,6 +344,48 @@
 	return strlen(buf);
 }
 
+static ssize_t fsl_sata_rx_watermark_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	unsigned int rx_watermark;
+	unsigned long flags;
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct sata_fsl_host_priv *host_priv = host->private_data;
+	void __iomem *csr_base = host_priv->csr_base;
+
+	spin_lock_irqsave(&host->lock, flags);
+	rx_watermark = ioread32(csr_base + TRANSCFG);
+	rx_watermark &= 0x1f;
+
+	spin_unlock_irqrestore(&host->lock, flags);
+	return sprintf(buf, "%d\n", rx_watermark);
+}
+
+static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	unsigned int rx_watermark;
+	unsigned long flags;
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct sata_fsl_host_priv *host_priv = host->private_data;
+	void __iomem *csr_base = host_priv->csr_base;
+	u32 temp;
+
+	if (sscanf(buf, "%d", &rx_watermark) != 1) {
+		printk(KERN_ERR "fsl-sata: wrong parameter format.\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&host->lock, flags);
+	temp = ioread32(csr_base + TRANSCFG);
+	temp &= 0xffffffe0;
+	iowrite32(temp | rx_watermark, csr_base + TRANSCFG);
+
+	spin_unlock_irqrestore(&host->lock, flags);
+	return strlen(buf);
+}
+
 static inline unsigned int sata_fsl_tag(unsigned int tag,
 					void __iomem *hcr_base)
 {
@@ -1500,6 +1543,17 @@
 	if (retval)
 		goto error_exit_with_cleanup;
 
+	host_priv->rx_watermark.show = fsl_sata_rx_watermark_show;
+	host_priv->rx_watermark.store = fsl_sata_rx_watermark_store;
+	sysfs_attr_init(&host_priv->rx_watermark.attr);
+	host_priv->rx_watermark.attr.name = "rx_watermark";
+	host_priv->rx_watermark.attr.mode = S_IRUGO | S_IWUSR;
+	retval = device_create_file(host->dev, &host_priv->rx_watermark);
+	if (retval) {
+		device_remove_file(&ofdev->dev, &host_priv->intr_coalescing);
+		goto error_exit_with_cleanup;
+	}
+
 	return 0;
 
 error_exit_with_cleanup:
@@ -1522,6 +1576,7 @@
 	struct sata_fsl_host_priv *host_priv = host->private_data;
 
 	device_remove_file(&ofdev->dev, &host_priv->intr_coalescing);
+	device_remove_file(&ofdev->dev, &host_priv->rx_watermark);
 
 	ata_host_detach(host);
 
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 5dba77c..b20aa96 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -251,7 +251,7 @@
 };
 
 static struct scsi_host_template ahci_highbank_platform_sht = {
-	AHCI_SHT("highbank-ahci"),
+	AHCI_SHT("sata_highbank"),
 };
 
 static const struct of_device_id ahci_of_match[] = {
@@ -418,7 +418,7 @@
 }
 #endif
 
-SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
+static SIMPLE_DEV_PM_OPS(ahci_highbank_pm_ops,
 		  ahci_highbank_suspend, ahci_highbank_resume);
 
 static struct platform_driver ahci_highbank_driver = {
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index caf33f6..4799868 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -17,6 +17,7 @@
 #include <linux/libata.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/err.h>
 
 #define DRV_NAME "sata_rcar"
 
@@ -799,9 +800,9 @@
 
 	host->private_data = priv;
 
-	priv->base = devm_request_and_ioremap(&pdev->dev, mem);
-	if (!priv->base) {
-		ret = -EADDRNOTAVAIL;
+	priv->base = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(priv->base)) {
+		ret = PTR_ERR(priv->base);
 		goto cleanup;
 	}
 
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 9a6b05a..7072404 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -920,7 +920,7 @@
 		pm_wakeup_event(dev, 0);
 
 	if (pm_wakeup_pending()) {
-		pm_runtime_put_sync(dev);
+		pm_runtime_put(dev);
 		return -EBUSY;
 	}
 
@@ -961,7 +961,7 @@
 		pm_runtime_enable(dev);
 	}
 
-	pm_runtime_put_sync(dev);
+	pm_runtime_put(dev);
 	return ret;
 }
 
@@ -1327,7 +1327,7 @@
 		pm_generic_complete(dev);
 		pm_runtime_set_active(dev);
 		pm_runtime_enable(dev);
-		pm_runtime_idle(dev);
+		pm_request_idle(dev);
 	}
 }
 
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index d03d290..bfd898b 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -324,6 +324,6 @@
 	 * Let runtime PM try to suspend devices that haven't been in use before
 	 * going into the system-wide sleep state we're resuming from.
 	 */
-	pm_runtime_idle(dev);
+	pm_request_idle(dev);
 }
 #endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 15beb50..5a9b656 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -756,7 +756,7 @@
 
 	device_unlock(dev);
 
-	pm_runtime_put_sync(dev);
+	pm_runtime_put(dev);
 }
 
 /**
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 32ee0fc..f0077cb 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -55,6 +55,7 @@
  * @rate:	Frequency in hertz
  * @u_volt:	Nominal voltage in microvolts corresponding to this OPP
  * @dev_opp:	points back to the device_opp struct this opp belongs to
+ * @head:	RCU callback head used for deferred freeing
  *
  * This structure stores the OPP information for a given device.
  */
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 1244930..ef13ad08 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1400,5 +1400,5 @@
 	if (dev->power.runtime_status == RPM_ACTIVE)
 		pm_runtime_set_suspended(dev);
 	if (dev->power.irq_safe && dev->parent)
-		pm_runtime_put_sync(dev->parent);
+		pm_runtime_put(dev->parent);
 }
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 0d2e82f..7c3b3dc 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -1337,7 +1337,7 @@
 {
 	struct tpm_chip *chip = dev_get_drvdata(dev);
 	struct tpm_cmd_t cmd;
-	int rc;
+	int rc, try;
 
 	u8 dummy_hash[TPM_DIGEST_SIZE] = { 0 };
 
@@ -1355,9 +1355,32 @@
 	}
 
 	/* now do the actual savestate */
-	cmd.header.in = savestate_header;
-	rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE,
-			  "sending savestate before suspend");
+	for (try = 0; try < TPM_RETRY; try++) {
+		cmd.header.in = savestate_header;
+		rc = transmit_cmd(chip, &cmd, SAVESTATE_RESULT_SIZE, NULL);
+
+		/*
+		 * If the TPM indicates that it is too busy to respond to
+		 * this command then retry before giving up.  It can take
+		 * several seconds for this TPM to be ready.
+		 *
+		 * This can happen if the TPM has already been sent the
+		 * SaveState command before the driver has loaded.  TCG 1.2
+		 * specification states that any communication after SaveState
+		 * may cause the TPM to invalidate previously saved state.
+		 */
+		if (rc != TPM_WARN_RETRY)
+			break;
+		msleep(TPM_TIMEOUT_RETRY);
+	}
+
+	if (rc)
+		dev_err(chip->dev,
+			"Error (%d) sending savestate before suspend\n", rc);
+	else if (try > 0)
+		dev_warn(chip->dev, "TPM savestate took %dms\n",
+			 try * TPM_TIMEOUT_RETRY);
+
 	return rc;
 }
 EXPORT_SYMBOL_GPL(tpm_pm_suspend);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 81b5201..0770d1d 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -32,10 +32,12 @@
 	TPM_MINOR = 224,	/* officially assigned */
 	TPM_BUFSIZE = 4096,
 	TPM_NUM_DEVICES = 256,
+	TPM_RETRY = 50,		/* 5 seconds */
 };
 
 enum tpm_timeout {
 	TPM_TIMEOUT = 5,	/* msecs */
+	TPM_TIMEOUT_RETRY = 100 /* msecs */
 };
 
 /* TPM addresses */
@@ -44,6 +46,7 @@
 	TPM_ADDR = 0x4E,
 };
 
+#define TPM_WARN_RETRY          0x800
 #define TPM_WARN_DOING_SELFTEST 0x802
 #define TPM_ERR_DEACTIVATED     0x6
 #define TPM_ERR_DISABLED        0x7
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 8fe7ac3..37d5dcc 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 Infineon Technologies
+ * Copyright (C) 2012,2013 Infineon Technologies
  *
  * Authors:
  * Peter Huewe <peter.huewe@infineon.com>
@@ -56,13 +56,21 @@
 #define TPM_TIMEOUT_US_HI  (TPM_TIMEOUT_US_LOW + 2000)
 
 /* expected value for DIDVID register */
-#define TPM_TIS_I2C_DID_VID 0x000b15d1L
+#define TPM_TIS_I2C_DID_VID_9635 0xd1150b00L
+#define TPM_TIS_I2C_DID_VID_9645 0x001a15d1L
+
+enum i2c_chip_type {
+	SLB9635,
+	SLB9645,
+	UNKNOWN,
+};
 
 /* Structure to store I2C TPM specific stuff */
 struct tpm_inf_dev {
 	struct i2c_client *client;
 	u8 buf[TPM_BUFSIZE + sizeof(u8)]; /* max. buffer size + addr */
 	struct tpm_chip *chip;
+	enum i2c_chip_type chip_type;
 };
 
 static struct tpm_inf_dev tpm_dev;
@@ -90,10 +98,20 @@
 static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
 {
 
-	struct i2c_msg msg1 = { tpm_dev.client->addr, 0, 1, &addr };
-	struct i2c_msg msg2 = { tpm_dev.client->addr, I2C_M_RD, len, buffer };
+	struct i2c_msg msg1 = {
+		.addr = tpm_dev.client->addr,
+		.len = 1,
+		.buf = &addr
+	};
+	struct i2c_msg msg2 = {
+		.addr = tpm_dev.client->addr,
+		.flags = I2C_M_RD,
+		.len = len,
+		.buf = buffer
+	};
+	struct i2c_msg msgs[] = {msg1, msg2};
 
-	int rc;
+	int rc = 0;
 	int count;
 
 	/* Lock the adapter for the duration of the whole sequence. */
@@ -101,30 +119,53 @@
 		return -EOPNOTSUPP;
 	i2c_lock_adapter(tpm_dev.client->adapter);
 
-	for (count = 0; count < MAX_COUNT; count++) {
-		rc = __i2c_transfer(tpm_dev.client->adapter, &msg1, 1);
-		if (rc > 0)
-			break;	/* break here to skip sleep */
+	if (tpm_dev.chip_type == SLB9645) {
+		/* use a combined read for newer chips
+		 * unfortunately the smbus functions are not suitable due to
+		 * the 32 byte limit of the smbus.
+		 * retries should usually not be needed, but are kept just to
+		 * be on the safe side.
+		 */
+		for (count = 0; count < MAX_COUNT; count++) {
+			rc = __i2c_transfer(tpm_dev.client->adapter, msgs, 2);
+			if (rc > 0)
+				break;	/* break here to skip sleep */
+			usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
+		}
+	} else {
+		/* slb9635 protocol should work in all cases */
+		for (count = 0; count < MAX_COUNT; count++) {
+			rc = __i2c_transfer(tpm_dev.client->adapter, &msg1, 1);
+			if (rc > 0)
+				break;	/* break here to skip sleep */
 
-		usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
-	}
+			usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
+		}
 
-	if (rc <= 0)
-		goto out;
+		if (rc <= 0)
+			goto out;
 
-	/* After the TPM has successfully received the register address it needs
-	 * some time, thus we're sleeping here again, before retrieving the data
-	 */
-	for (count = 0; count < MAX_COUNT; count++) {
-		usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
-		rc = __i2c_transfer(tpm_dev.client->adapter, &msg2, 1);
-		if (rc > 0)
-			break;
-
+		/* After the TPM has successfully received the register address
+		 * it needs some time, thus we're sleeping here again, before
+		 * retrieving the data
+		 */
+		for (count = 0; count < MAX_COUNT; count++) {
+			usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
+			rc = __i2c_transfer(tpm_dev.client->adapter, &msg2, 1);
+			if (rc > 0)
+				break;
+		}
 	}
 
 out:
 	i2c_unlock_adapter(tpm_dev.client->adapter);
+	/* take care of 'guard time' */
+	usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
+
+	/* __i2c_transfer returns the number of successfully transferred
+	 * messages.
+	 * So rc should be greater than 0 here otherwise we have an error.
+	 */
 	if (rc <= 0)
 		return -EIO;
 
@@ -138,7 +179,11 @@
 	int rc = -EIO;
 	int count;
 
-	struct i2c_msg msg1 = { tpm_dev.client->addr, 0, len + 1, tpm_dev.buf };
+	struct i2c_msg msg1 = {
+		.addr = tpm_dev.client->addr,
+		.len = len + 1,
+		.buf = tpm_dev.buf
+	};
 
 	if (len > TPM_BUFSIZE)
 		return -EINVAL;
@@ -154,16 +199,24 @@
 	/*
 	 * NOTE: We have to use these special mechanisms here and unfortunately
 	 * cannot rely on the standard behavior of i2c_transfer.
+	 * Even for newer chips the smbus functions are not
+	 * suitable due to the 32 byte limit of the smbus.
 	 */
 	for (count = 0; count < max_count; count++) {
 		rc = __i2c_transfer(tpm_dev.client->adapter, &msg1, 1);
 		if (rc > 0)
 			break;
-
 		usleep_range(sleep_low, sleep_hi);
 	}
 
 	i2c_unlock_adapter(tpm_dev.client->adapter);
+	/* take care of 'guard time' */
+	usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
+
+	/* __i2c_transfer returns the number of successfully transferred
+	 * messages.
+	 * So rc should be greater than 0 here otherwise we have an error.
+	 */
 	if (rc <= 0)
 		return -EIO;
 
@@ -283,11 +336,18 @@
 static u8 tpm_tis_i2c_status(struct tpm_chip *chip)
 {
 	/* NOTE: since I2C read may fail, return 0 in this case --> time-out */
-	u8 buf;
-	if (iic_tpm_read(TPM_STS(chip->vendor.locality), &buf, 1) < 0)
-		return 0;
-	else
-		return buf;
+	u8 buf = 0xFF;
+	u8 i = 0;
+
+	do {
+		if (iic_tpm_read(TPM_STS(chip->vendor.locality), &buf, 1) < 0)
+			return 0;
+
+		i++;
+	/* if locallity is set STS should not be 0xFF */
+	} while ((buf == 0xFF) && i < 10);
+
+	return buf;
 }
 
 static void tpm_tis_i2c_ready(struct tpm_chip *chip)
@@ -328,7 +388,7 @@
 
 	/* check current status */
 	*status = tpm_tis_i2c_status(chip);
-	if ((*status & mask) == mask)
+	if ((*status != 0xFF) && (*status & mask) == mask)
 		return 0;
 
 	stop = jiffies + timeout;
@@ -372,7 +432,6 @@
 		/* avoid endless loop in case of broken HW */
 		if (retries > MAX_COUNT_LONG)
 			return -EIO;
-
 	}
 	return size;
 }
@@ -480,7 +539,6 @@
 			rc = -EIO;
 			goto out_err;
 		}
-
 	}
 
 	/* write last byte */
@@ -568,6 +626,7 @@
 
 	chip = tpm_register_hardware(dev, &tpm_tis_i2c);
 	if (!chip) {
+		dev_err(dev, "could not register hardware\n");
 		rc = -ENODEV;
 		goto out_err;
 	}
@@ -582,20 +641,24 @@
 	chip->vendor.timeout_d = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
 
 	if (request_locality(chip, 0) != 0) {
+		dev_err(dev, "could not request locality\n");
 		rc = -ENODEV;
 		goto out_vendor;
 	}
 
 	/* read four bytes from DID_VID register */
 	if (iic_tpm_read(TPM_DID_VID(0), (u8 *)&vendor, 4) < 0) {
+		dev_err(dev, "could not read vendor id\n");
 		rc = -EIO;
 		goto out_release;
 	}
 
-	/* create DID_VID register value, after swapping to little-endian */
-	vendor = be32_to_cpu((__be32) vendor);
-
-	if (vendor != TPM_TIS_I2C_DID_VID) {
+	if (vendor == TPM_TIS_I2C_DID_VID_9645) {
+		tpm_dev.chip_type = SLB9645;
+	} else if (vendor == TPM_TIS_I2C_DID_VID_9635) {
+		tpm_dev.chip_type = SLB9635;
+	} else {
+		dev_err(dev, "vendor id did not match! ID was %08x\n", vendor);
 		rc = -ENODEV;
 		goto out_release;
 	}
@@ -631,22 +694,53 @@
 
 static const struct i2c_device_id tpm_tis_i2c_table[] = {
 	{"tpm_i2c_infineon", 0},
+	{"slb9635tt", 0},
+	{"slb9645tt", 1},
 	{},
 };
 
 MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_table);
+
+#ifdef CONFIG_OF
+static const struct of_device_id tpm_tis_i2c_of_match[] = {
+	{
+		.name = "tpm_i2c_infineon",
+		.type = "tpm",
+		.compatible = "infineon,tpm_i2c_infineon",
+		.data = (void *)0
+	},
+	{
+		.name = "slb9635tt",
+		.type = "tpm",
+		.compatible = "infineon,slb9635tt",
+		.data = (void *)0
+	},
+	{
+		.name = "slb9645tt",
+		.type = "tpm",
+		.compatible = "infineon,slb9645tt",
+		.data = (void *)1
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, tpm_tis_i2c_of_match);
+#endif
+
 static SIMPLE_DEV_PM_OPS(tpm_tis_i2c_ops, tpm_pm_suspend, tpm_pm_resume);
 
 static int tpm_tis_i2c_probe(struct i2c_client *client,
 			     const struct i2c_device_id *id)
 {
 	int rc;
-	if (tpm_dev.client != NULL)
+	struct device *dev = &(client->dev);
+
+	if (tpm_dev.client != NULL) {
+		dev_err(dev, "This driver only supports one client at a time\n");
 		return -EBUSY;	/* We only support one client */
+	}
 
 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
-		dev_err(&client->dev,
-			"no algorithms associated to the i2c bus\n");
+		dev_err(dev, "no algorithms associated to the i2c bus\n");
 		return -ENODEV;
 	}
 
@@ -682,7 +776,6 @@
 }
 
 static struct i2c_driver tpm_tis_i2c_driver = {
-
 	.id_table = tpm_tis_i2c_table,
 	.probe = tpm_tis_i2c_probe,
 	.remove = tpm_tis_i2c_remove,
@@ -690,11 +783,12 @@
 		   .name = "tpm_i2c_infineon",
 		   .owner = THIS_MODULE,
 		   .pm = &tpm_tis_i2c_ops,
+		   .of_match_table = of_match_ptr(tpm_tis_i2c_of_match),
 		   },
 };
 
 module_i2c_driver(tpm_tis_i2c_driver);
 MODULE_AUTHOR("Peter Huewe <peter.huewe@infineon.com>");
 MODULE_DESCRIPTION("TPM TIS I2C Infineon Driver");
-MODULE_VERSION("2.1.5");
+MODULE_VERSION("2.2.0");
 MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
index 1f5f71e..5bb8e2d 100644
--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
+++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
@@ -36,7 +36,6 @@
 #include <linux/i2c.h>
 #include <linux/fs.h>
 #include <linux/miscdevice.h>
-#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
 #include <linux/init.h>
@@ -50,7 +49,6 @@
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/slab.h>
-#include <linux/sched.h>
 
 #include "tpm.h"
 #include "tpm_i2c_stm_st33.h"
@@ -178,7 +176,7 @@
 	struct i2c_client *client;
 	struct st33zp24_platform_data *pin_infos;
 
-	client = (struct i2c_client *) TPM_VPRIV(chip);
+	client = (struct i2c_client *)TPM_VPRIV(chip);
 	pin_infos = client->dev.platform_data;
 
 	status = wait_for_completion_interruptible_timeout(
@@ -197,12 +195,12 @@
 	int status = 2;
 	struct i2c_client *client;
 
-	client = (struct i2c_client *) TPM_VPRIV(chip);
+	client = (struct i2c_client *)TPM_VPRIV(chip);
 
 	status = _wait_for_interrupt_serirq_timeout(chip, timeout);
 	if (!status) {
 		status = -EBUSY;
-	} else{
+	} else {
 		clear_interruption(client);
 		if (condition)
 			status = 1;
@@ -219,7 +217,7 @@
 	struct i2c_client *client;
 	u8 data;
 
-	client = (struct i2c_client *) TPM_VPRIV(chip);
+	client = (struct i2c_client *)TPM_VPRIV(chip);
 
 	data = TPM_STS_COMMAND_READY;
 	I2C_WRITE_DATA(client, TPM_STS, &data, 1);
@@ -236,7 +234,7 @@
 {
 	struct i2c_client *client;
 	u8 data;
-	client = (struct i2c_client *) TPM_VPRIV(chip);
+	client = (struct i2c_client *)TPM_VPRIV(chip);
 
 	I2C_READ_DATA(client, TPM_STS, &data, 1);
 	return data;
@@ -254,7 +252,7 @@
 	u8 data;
 	u8 status;
 
-	client = (struct i2c_client *) TPM_VPRIV(chip);
+	client = (struct i2c_client *)TPM_VPRIV(chip);
 
 	status = I2C_READ_DATA(client, TPM_ACCESS, &data, 1);
 	if (status && (data &
@@ -278,7 +276,7 @@
 	struct i2c_client *client;
 	u8 data;
 
-	client = (struct i2c_client *) TPM_VPRIV(chip);
+	client = (struct i2c_client *)TPM_VPRIV(chip);
 
 	if (check_locality(chip) == chip->vendor.locality)
 		return chip->vendor.locality;
@@ -294,7 +292,7 @@
 						      chip->vendor.timeout_a);
 		if (rc > 0)
 			return chip->vendor.locality;
-	} else{
+	} else {
 		stop = jiffies + chip->vendor.timeout_a;
 		do {
 			if (check_locality(chip) >= 0)
@@ -316,7 +314,7 @@
 	struct i2c_client *client;
 	u8 data;
 
-	client = (struct i2c_client *) TPM_VPRIV(chip);
+	client = (struct i2c_client *)TPM_VPRIV(chip);
 	data = TPM_ACCESS_ACTIVE_LOCALITY;
 
 	I2C_WRITE_DATA(client, TPM_ACCESS, &data, 1);
@@ -333,7 +331,7 @@
 	int burstcnt, status;
 	u8 tpm_reg, temp;
 
-	struct i2c_client *client = (struct i2c_client *) TPM_VPRIV(chip);
+	struct i2c_client *client = (struct i2c_client *)TPM_VPRIV(chip);
 
 	stop = jiffies + chip->vendor.timeout_d;
 	do {
@@ -379,7 +377,7 @@
 						       mask), timeout);
 		if (rc > 0)
 			return 0;
-	} else{
+	} else {
 		stop = jiffies + timeout;
 		do {
 			msleep(TPM_TIMEOUT);
@@ -403,7 +401,7 @@
 	int size = 0, burstcnt, len;
 	struct i2c_client *client;
 
-	client = (struct i2c_client *) TPM_VPRIV(chip);
+	client = (struct i2c_client *)TPM_VPRIV(chip);
 
 	while (size < count &&
 	       wait_for_stat(chip,
@@ -433,7 +431,7 @@
 
 	disable_irq_nosync(irq);
 
-	client = (struct i2c_client *) TPM_VPRIV(chip);
+	client = (struct i2c_client *)TPM_VPRIV(chip);
 	pin_infos = client->dev.platform_data;
 
 	complete(&pin_infos->irq_detection);
@@ -453,8 +451,7 @@
 static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
 			    size_t len)
 {
-	u32 status,
-	    burstcnt = 0, i, size;
+	u32 status, burstcnt = 0, i, size;
 	int ret;
 	u8 data;
 	struct i2c_client *client;
@@ -483,7 +480,7 @@
 		}
 	}
 
-	for (i = 0 ; i < len - 1 ;) {
+	for (i = 0; i < len - 1;) {
 		burstcnt = get_burstcount(chip);
 		size = min_t(int, len - i - 1, burstcnt);
 		ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf, size);
@@ -547,7 +544,7 @@
 		goto out;
 	}
 
-	expected = be32_to_cpu(*(__be32 *) (buf + 2));
+	expected = be32_to_cpu(*(__be32 *)(buf + 2));
 	if (expected > count) {
 		size = -EIO;
 		goto out;
@@ -569,7 +566,7 @@
 
 static bool tpm_st33_i2c_req_canceled(struct tpm_chip *chip, u8 status)
 {
-       return (status == TPM_STS_COMMAND_READY);
+	return (status == TPM_STS_COMMAND_READY);
 }
 
 static const struct file_operations tpm_st33_i2c_fops = {
@@ -617,7 +614,7 @@
 	.miscdev = {.fops = &tpm_st33_i2c_fops,},
 };
 
-static int interrupts ;
+static int interrupts;
 module_param(interrupts, int, 0444);
 MODULE_PARM_DESC(interrupts, "Enable interrupts");
 
@@ -714,7 +711,7 @@
 				"TPM SERIRQ management", chip);
 		if (err < 0) {
 			dev_err(chip->dev , "TPM SERIRQ signals %d not available\n",
-			gpio_to_irq(platform_data->io_serirq));
+				gpio_to_irq(platform_data->io_serirq));
 			goto _irq_set;
 		}
 
@@ -754,7 +751,7 @@
 	dev_info(chip->dev, "TPM I2C Initialized\n");
 	return 0;
 _irq_set:
-	free_irq(gpio_to_irq(platform_data->io_serirq), (void *) chip);
+	free_irq(gpio_to_irq(platform_data->io_serirq), (void *)chip);
 _gpio_init2:
 	if (interrupts)
 		gpio_free(platform_data->io_serirq);
@@ -784,7 +781,7 @@
 {
 	struct tpm_chip *chip = (struct tpm_chip *)i2c_get_clientdata(client);
 	struct st33zp24_platform_data *pin_infos =
-		((struct i2c_client *) TPM_VPRIV(chip))->dev.platform_data;
+		((struct i2c_client *)TPM_VPRIV(chip))->dev.platform_data;
 
 	if (pin_infos != NULL) {
 		free_irq(pin_infos->io_serirq, chip);
@@ -823,9 +820,9 @@
 	struct st33zp24_platform_data *pin_infos = dev->platform_data;
 	int ret = 0;
 
-	if (power_mgt)
+	if (power_mgt) {
 		gpio_set_value(pin_infos->io_lpcpd, 0);
-	else{
+	} else {
 		if (chip->data_buffer == NULL)
 			chip->data_buffer = pin_infos->tpm_i2c_buffer[0];
 		ret = tpm_pm_suspend(dev);
@@ -851,12 +848,12 @@
 					  (chip->vendor.status(chip) &
 					  TPM_STS_VALID) == TPM_STS_VALID,
 					  chip->vendor.timeout_b);
-	} else{
-	if (chip->data_buffer == NULL)
-		chip->data_buffer = pin_infos->tpm_i2c_buffer[0];
-	ret = tpm_pm_resume(dev);
-	if (!ret)
-		tpm_do_selftest(chip);
+	} else {
+		if (chip->data_buffer == NULL)
+			chip->data_buffer = pin_infos->tpm_i2c_buffer[0];
+		ret = tpm_pm_resume(dev);
+		if (!ret)
+			tpm_do_selftest(chip);
 	}
 	return ret;
 }				/* tpm_st33_i2c_pm_resume() */
@@ -867,7 +864,8 @@
 	{}
 };
 MODULE_DEVICE_TABLE(i2c, tpm_st33_i2c_id);
-static SIMPLE_DEV_PM_OPS(tpm_st33_i2c_ops, tpm_st33_i2c_pm_suspend, tpm_st33_i2c_pm_resume);
+static SIMPLE_DEV_PM_OPS(tpm_st33_i2c_ops, tpm_st33_i2c_pm_suspend,
+	tpm_st33_i2c_pm_resume);
 static struct i2c_driver tpm_st33_i2c_driver = {
 	.driver = {
 		   .owner = THIS_MODULE,
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 720ebcf..2168d15 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -158,9 +158,9 @@
 					    ACPI_TYPE_STRING);
 	if (ACPI_FAILURE(status))
 		return -ENOMEM;
-	strncpy(version,
+	strlcpy(version,
 		((union acpi_object *)output.pointer)->string.pointer,
-		PPI_VERSION_LEN);
+		PPI_VERSION_LEN + 1);
 	kfree(output.pointer);
 	output.length = ACPI_ALLOCATE_BUFFER;
 	output.pointer = NULL;
@@ -237,9 +237,9 @@
 					    ACPI_TYPE_STRING);
 	if (ACPI_FAILURE(status))
 		return -ENOMEM;
-	strncpy(version,
+	strlcpy(version,
 		((union acpi_object *)output.pointer)->string.pointer,
-		PPI_VERSION_LEN);
+		PPI_VERSION_LEN + 1);
 	/*
 	 * PPI spec defines params[3].type as empty package, but some platforms
 	 * (e.g. Capella with PPI 1.0) need integer/string/buffer type, so for
@@ -351,7 +351,7 @@
 static ssize_t show_ppi_operations(char *buf, u32 start, u32 end)
 {
 	char *str = buf;
-	char version[PPI_VERSION_LEN];
+	char version[PPI_VERSION_LEN + 1];
 	acpi_handle handle;
 	acpi_status status;
 	struct acpi_object_list input;
@@ -381,9 +381,9 @@
 	if (ACPI_FAILURE(status))
 		return -ENOMEM;
 
-	strncpy(version,
+	strlcpy(version,
 		((union acpi_object *)output.pointer)->string.pointer,
-		PPI_VERSION_LEN);
+		PPI_VERSION_LEN + 1);
 	kfree(output.pointer);
 	output.length = ACPI_ALLOCATE_BUFFER;
 	output.pointer = NULL;
diff --git a/drivers/clk/x86/Makefile b/drivers/clk/x86/Makefile
index f9ba4fa..0478138 100644
--- a/drivers/clk/x86/Makefile
+++ b/drivers/clk/x86/Makefile
@@ -1,2 +1,2 @@
-clk-x86-lpss-objs		:= clk-lpss.o clk-lpt.o
+clk-x86-lpss-objs		:= clk-lpt.o
 obj-$(CONFIG_X86_INTEL_LPSS)	+= clk-x86-lpss.o
diff --git a/drivers/clk/x86/clk-lpss.c b/drivers/clk/x86/clk-lpss.c
deleted file mode 100644
index b5e229f..0000000
--- a/drivers/clk/x86/clk-lpss.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Intel Low Power Subsystem clocks.
- *
- * Copyright (C) 2013, Intel Corporation
- * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
- *	    Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/acpi.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/module.h>
-
-static int clk_lpss_is_mmio_resource(struct acpi_resource *res, void *data)
-{
-	struct resource r;
-	return !acpi_dev_resource_memory(res, &r);
-}
-
-static acpi_status clk_lpss_find_mmio(acpi_handle handle, u32 level,
-				      void *data, void **retval)
-{
-	struct resource_list_entry *rentry;
-	struct list_head resource_list;
-	struct acpi_device *adev;
-	const char *uid = data;
-	int ret;
-
-	if (acpi_bus_get_device(handle, &adev))
-		return AE_OK;
-
-	if (uid) {
-		if (!adev->pnp.unique_id)
-			return AE_OK;
-		if (strcmp(uid, adev->pnp.unique_id))
-			return AE_OK;
-	}
-
-	INIT_LIST_HEAD(&resource_list);
-	ret = acpi_dev_get_resources(adev, &resource_list,
-				     clk_lpss_is_mmio_resource, NULL);
-	if (ret < 0)
-		return AE_NO_MEMORY;
-
-	list_for_each_entry(rentry, &resource_list, node)
-		if (resource_type(&rentry->res) == IORESOURCE_MEM) {
-			*(struct resource *)retval = rentry->res;
-			break;
-		}
-
-	acpi_dev_free_resource_list(&resource_list);
-	return AE_OK;
-}
-
-/**
- * clk_register_lpss_gate - register LPSS clock gate
- * @name: name of this clock gate
- * @parent_name: parent clock name
- * @hid: ACPI _HID of the device
- * @uid: ACPI _UID of the device (optional)
- * @offset: LPSS PRV_CLOCK_PARAMS offset
- *
- * Creates and registers LPSS clock gate.
- */
-struct clk *clk_register_lpss_gate(const char *name, const char *parent_name,
-				   const char *hid, const char *uid,
-				   unsigned offset)
-{
-	struct resource res = { };
-	void __iomem *mmio_base;
-	acpi_status status;
-	struct clk *clk;
-
-	/*
-	 * First try to look the device and its mmio resource from the
-	 * ACPI namespace.
-	 */
-	status = acpi_get_devices(hid, clk_lpss_find_mmio, (void *)uid,
-				  (void **)&res);
-	if (ACPI_FAILURE(status) || !res.start)
-		return ERR_PTR(-ENODEV);
-
-	mmio_base = ioremap(res.start, resource_size(&res));
-	if (!mmio_base)
-		return ERR_PTR(-ENOMEM);
-
-	clk = clk_register_gate(NULL, name, parent_name, 0, mmio_base + offset,
-				0, 0, NULL);
-	if (IS_ERR(clk))
-		iounmap(mmio_base);
-
-	return clk;
-}
diff --git a/drivers/clk/x86/clk-lpss.h b/drivers/clk/x86/clk-lpss.h
deleted file mode 100644
index e9460f4..0000000
--- a/drivers/clk/x86/clk-lpss.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Intel Low Power Subsystem clock.
- *
- * Copyright (C) 2013, Intel Corporation
- * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
- *	    Heikki Krogerus <heikki.krogerus@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __CLK_LPSS_H
-#define __CLK_LPSS_H
-
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/clk.h>
-
-#ifdef CONFIG_ACPI
-extern struct clk *clk_register_lpss_gate(const char *name,
-					  const char *parent_name,
-					  const char *hid, const char *uid,
-					  unsigned offset);
-#else
-static inline struct clk *clk_register_lpss_gate(const char *name,
-						 const char *parent_name,
-						 const char *hid,
-						 const char *uid,
-						 unsigned offset)
-{
-	return ERR_PTR(-ENODEV);
-}
-#endif
-
-#endif /* __CLK_LPSS_H */
diff --git a/drivers/clk/x86/clk-lpt.c b/drivers/clk/x86/clk-lpt.c
index 81298ae..5cf4f46 100644
--- a/drivers/clk/x86/clk-lpt.c
+++ b/drivers/clk/x86/clk-lpt.c
@@ -10,7 +10,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/acpi.h>
 #include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/clk-provider.h>
@@ -18,8 +17,6 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 
-#include "clk-lpss.h"
-
 #define PRV_CLOCK_PARAMS 0x800
 
 static int lpt_clk_probe(struct platform_device *pdev)
@@ -34,40 +31,6 @@
 
 	/* Shared DMA clock */
 	clk_register_clkdev(clk, "hclk", "INTL9C60.0.auto");
-
-	/* SPI clocks */
-	clk = clk_register_lpss_gate("spi0_clk", "lpss_clk", "INT33C0", NULL,
-				     PRV_CLOCK_PARAMS);
-	if (!IS_ERR(clk))
-		clk_register_clkdev(clk, NULL, "INT33C0:00");
-
-	clk = clk_register_lpss_gate("spi1_clk", "lpss_clk", "INT33C1", NULL,
-				     PRV_CLOCK_PARAMS);
-	if (!IS_ERR(clk))
-		clk_register_clkdev(clk, NULL, "INT33C1:00");
-
-	/* I2C clocks */
-	clk = clk_register_lpss_gate("i2c0_clk", "lpss_clk", "INT33C2", NULL,
-				     PRV_CLOCK_PARAMS);
-	if (!IS_ERR(clk))
-		clk_register_clkdev(clk, NULL, "INT33C2:00");
-
-	clk = clk_register_lpss_gate("i2c1_clk", "lpss_clk", "INT33C3", NULL,
-				     PRV_CLOCK_PARAMS);
-	if (!IS_ERR(clk))
-		clk_register_clkdev(clk, NULL, "INT33C3:00");
-
-	/* UART clocks */
-	clk = clk_register_lpss_gate("uart0_clk", "lpss_clk", "INT33C4", NULL,
-				     PRV_CLOCK_PARAMS);
-	if (!IS_ERR(clk))
-		clk_register_clkdev(clk, NULL, "INT33C4:00");
-
-	clk = clk_register_lpss_gate("uart1_clk", "lpss_clk", "INT33C5", NULL,
-				     PRV_CLOCK_PARAMS);
-	if (!IS_ERR(clk))
-		clk_register_clkdev(clk, NULL, "INT33C5:00");
-
 	return 0;
 }
 
@@ -79,8 +42,7 @@
 	.probe = lpt_clk_probe,
 };
 
-static int __init lpt_clk_init(void)
+int __init lpt_clk_init(void)
 {
 	return platform_driver_register(&lpt_clk_driver);
 }
-arch_initcall(lpt_clk_init);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index cbcb21e..a1488f5 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -205,10 +205,99 @@
 source "drivers/cpufreq/Kconfig.arm"
 endmenu
 
+menu "AVR32 CPU frequency scaling drivers"
+depends on AVR32
+
+config AVR32_AT32AP_CPUFREQ
+	bool "CPU frequency driver for AT32AP"
+	depends on PLATFORM_AT32AP
+	default n
+	help
+	  This enables the CPU frequency driver for AT32AP processors.
+	  If in doubt, say N.
+
+endmenu
+
+menu "CPUFreq processor drivers"
+depends on IA64
+
+config IA64_ACPI_CPUFREQ
+	tristate "ACPI Processor P-States driver"
+	select CPU_FREQ_TABLE
+	depends on ACPI_PROCESSOR
+	help
+	This driver adds a CPUFreq driver which utilizes the ACPI
+	Processor Performance States.
+
+	For details, take a look at <file:Documentation/cpu-freq/>.
+
+	If in doubt, say N.
+
+endmenu
+
+menu "MIPS CPUFreq processor drivers"
+depends on MIPS
+
+config LOONGSON2_CPUFREQ
+	tristate "Loongson2 CPUFreq Driver"
+	select CPU_FREQ_TABLE
+	help
+	  This option adds a CPUFreq driver for loongson processors which
+	  support software configurable cpu frequency.
+
+	  Loongson2F and it's successors support this feature.
+
+	  For details, take a look at <file:Documentation/cpu-freq/>.
+
+	  If in doubt, say N.
+
+endmenu
+
 menu "PowerPC CPU frequency scaling drivers"
 depends on PPC32 || PPC64
 source "drivers/cpufreq/Kconfig.powerpc"
 endmenu
 
+menu "SPARC CPU frequency scaling drivers"
+depends on SPARC64
+config SPARC_US3_CPUFREQ
+	tristate "UltraSPARC-III CPU Frequency driver"
+	select CPU_FREQ_TABLE
+	help
+	  This adds the CPUFreq driver for UltraSPARC-III processors.
+
+	  For details, take a look at <file:Documentation/cpu-freq>.
+
+	  If in doubt, say N.
+
+config SPARC_US2E_CPUFREQ
+	tristate "UltraSPARC-IIe CPU Frequency driver"
+	select CPU_FREQ_TABLE
+	help
+	  This adds the CPUFreq driver for UltraSPARC-IIe processors.
+
+	  For details, take a look at <file:Documentation/cpu-freq>.
+
+	  If in doubt, say N.
+endmenu
+
+menu "SH CPU Frequency scaling"
+depends on SUPERH
+config SH_CPU_FREQ
+	tristate "SuperH CPU Frequency driver"
+	select CPU_FREQ_TABLE
+	help
+	  This adds the cpufreq driver for SuperH. Any CPU that supports
+	  clock rate rounding through the clock framework can use this
+	  driver. While it will make the kernel slightly larger, this is
+	  harmless for CPUs that don't support rate rounding. The driver
+	  will also generate a notice in the boot log before disabling
+	  itself if the CPU in question is not capable of rate rounding.
+
+	  For details, take a look at <file:Documentation/cpu-freq>.
+
+	  If unsure, say N.
+endmenu
+
 endif
 endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 030ddf6..f3af18b 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -2,6 +2,93 @@
 # ARM CPU Frequency scaling drivers
 #
 
+config ARM_BIG_LITTLE_CPUFREQ
+	tristate
+	depends on ARM_CPU_TOPOLOGY
+
+config ARM_DT_BL_CPUFREQ
+	tristate "Generic ARM big LITTLE CPUfreq driver probed via DT"
+	select ARM_BIG_LITTLE_CPUFREQ
+	depends on OF && HAVE_CLK
+	help
+	  This enables the Generic CPUfreq driver for ARM big.LITTLE platform.
+	  This gets frequency tables from DT.
+
+config ARM_EXYNOS_CPUFREQ
+	bool "SAMSUNG EXYNOS SoCs"
+	depends on ARCH_EXYNOS
+	default y
+	help
+	  This adds the CPUFreq driver common part for Samsung
+	  EXYNOS SoCs.
+
+	  If in doubt, say N.
+
+config ARM_EXYNOS4210_CPUFREQ
+	def_bool CPU_EXYNOS4210
+	help
+	  This adds the CPUFreq driver for Samsung EXYNOS4210
+	  SoC (S5PV310 or S5PC210).
+
+config ARM_EXYNOS4X12_CPUFREQ
+	def_bool (SOC_EXYNOS4212 || SOC_EXYNOS4412)
+	help
+	  This adds the CPUFreq driver for Samsung EXYNOS4X12
+	  SoC (EXYNOS4212 or EXYNOS4412).
+
+config ARM_EXYNOS5250_CPUFREQ
+	def_bool SOC_EXYNOS5250
+	help
+	  This adds the CPUFreq driver for Samsung EXYNOS5250
+	  SoC.
+
+config ARM_EXYNOS5440_CPUFREQ
+	def_bool SOC_EXYNOS5440
+	depends on HAVE_CLK && PM_OPP && OF
+	help
+	  This adds the CPUFreq driver for Samsung EXYNOS5440
+	  SoC. The nature of exynos5440 clock controller is
+	  different than previous exynos controllers so not using
+	  the common exynos framework.
+
+config ARM_HIGHBANK_CPUFREQ
+	tristate "Calxeda Highbank-based"
+	depends on ARCH_HIGHBANK
+	select CPU_FREQ_TABLE
+	select GENERIC_CPUFREQ_CPU0
+	select PM_OPP
+	select REGULATOR
+
+	default m
+	help
+	  This adds the CPUFreq driver for Calxeda Highbank SoC
+	  based boards.
+
+	  If in doubt, say N.
+
+config ARM_IMX6Q_CPUFREQ
+	tristate "Freescale i.MX6Q cpufreq support"
+	depends on SOC_IMX6Q
+	depends on REGULATOR_ANATOP
+	help
+	  This adds cpufreq driver support for Freescale i.MX6Q SOC.
+
+	  If in doubt, say N.
+
+config ARM_INTEGRATOR
+	tristate "CPUfreq driver for ARM Integrator CPUs"
+	depends on ARCH_INTEGRATOR
+	default y
+	help
+	  This enables the CPUfreq driver for ARM Integrator CPUs.
+	  If in doubt, say Y.
+
+config ARM_KIRKWOOD_CPUFREQ
+	def_bool ARCH_KIRKWOOD && OF
+	help
+	  This adds the CPUFreq driver for Marvell Kirkwood
+	  SoCs.
+
 config ARM_OMAP2PLUS_CPUFREQ
 	bool "TI OMAP2+"
 	depends on ARCH_OMAP2PLUS
@@ -42,6 +129,7 @@
 config ARM_S5PV210_CPUFREQ
 	bool "Samsung S5PV210 and S5PC110"
 	depends on CPU_S5PV210
+	select CPU_FREQ_TABLE
 	default y
 	help
 	  This adds the CPUFreq driver for Samsung S5PV210 and
@@ -49,48 +137,11 @@
 
 	  If in doubt, say N.
 
-config ARM_EXYNOS_CPUFREQ
-	bool "SAMSUNG EXYNOS SoCs"
-	depends on ARCH_EXYNOS
-	default y
-	help
-	  This adds the CPUFreq driver common part for Samsung
-	  EXYNOS SoCs.
+config ARM_SA1100_CPUFREQ
+	bool
 
-	  If in doubt, say N.
-
-config ARM_EXYNOS4210_CPUFREQ
-	def_bool CPU_EXYNOS4210
-	help
-	  This adds the CPUFreq driver for Samsung EXYNOS4210
-	  SoC (S5PV310 or S5PC210).
-
-config ARM_EXYNOS4X12_CPUFREQ
-	def_bool (SOC_EXYNOS4212 || SOC_EXYNOS4412)
-	help
-	  This adds the CPUFreq driver for Samsung EXYNOS4X12
-	  SoC (EXYNOS4212 or EXYNOS4412).
-
-config ARM_EXYNOS5250_CPUFREQ
-	def_bool SOC_EXYNOS5250
-	help
-	  This adds the CPUFreq driver for Samsung EXYNOS5250
-	  SoC.
-
-config ARM_KIRKWOOD_CPUFREQ
-	def_bool ARCH_KIRKWOOD && OF
-	help
-	  This adds the CPUFreq driver for Marvell Kirkwood
-	  SoCs.
-
-config ARM_IMX6Q_CPUFREQ
-	tristate "Freescale i.MX6Q cpufreq support"
-	depends on SOC_IMX6Q
-	depends on REGULATOR_ANATOP
-	help
-	  This adds cpufreq driver support for Freescale i.MX6Q SOC.
-
-	  If in doubt, say N.
+config ARM_SA1110_CPUFREQ
+	bool
 
 config ARM_SPEAR_CPUFREQ
 	bool "SPEAr CPUFreq support"
@@ -98,18 +149,3 @@
 	default y
 	help
 	  This adds the CPUFreq driver support for SPEAr SOCs.
-
-config ARM_HIGHBANK_CPUFREQ
-	tristate "Calxeda Highbank-based"
-	depends on ARCH_HIGHBANK
-	select CPU_FREQ_TABLE
-	select GENERIC_CPUFREQ_CPU0
-	select PM_OPP
-	select REGULATOR
-
-	default m
-	help
-	  This adds the CPUFreq driver for Calxeda Highbank SoC
-	  based boards.
-
-	  If in doubt, say N.
diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc
index e76992f..9c926ca 100644
--- a/drivers/cpufreq/Kconfig.powerpc
+++ b/drivers/cpufreq/Kconfig.powerpc
@@ -1,3 +1,21 @@
+config CPU_FREQ_CBE
+	tristate "CBE frequency scaling"
+	depends on CBE_RAS && PPC_CELL
+	default m
+	help
+	  This adds the cpufreq driver for Cell BE processors.
+	  For details, take a look at <file:Documentation/cpu-freq/>.
+	  If you don't have such processor, say N
+
+config CPU_FREQ_CBE_PMI
+	bool "CBE frequency scaling using PMI interface"
+	depends on CPU_FREQ_CBE
+	default n
+	help
+	  Select this, if you want to use the PMI interface to switch
+	  frequencies. Using PMI, the processor will not only be able to run at
+	  lower speed, but also at lower core voltage.
+
 config CPU_FREQ_MAPLE
 	bool "Support for Maple 970FX Evaluation Board"
 	depends on PPC_MAPLE
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index d7dc0ed..2b8a8c3 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -129,6 +129,23 @@
 
 	  For details, take a look at <file:Documentation/cpu-freq/>.
 
+config X86_AMD_FREQ_SENSITIVITY
+	tristate "AMD frequency sensitivity feedback powersave bias"
+	depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD
+	help
+	  This adds AMD-specific powersave bias function to the ondemand
+	  governor, which allows it to make more power-conscious frequency
+	  change decisions based on feedback from hardware (availble on AMD
+	  Family 16h and above).
+
+	  Hardware feedback tells software how "sensitive" to frequency changes
+	  the CPUs' workloads are. CPU-bound workloads will be more sensitive
+	  -- they will perform better as frequency increases. Memory/IO-bound
+	  workloads will be less sensitive -- they will not necessarily perform
+	  better as frequency increases.
+
+	  If in doubt, say N.
+
 config X86_GX_SUSPMOD
 	tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
 	depends on X86_32 && PCI
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 863fd18..315b923 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -41,23 +41,54 @@
 obj-$(CONFIG_X86_P4_CLOCKMOD)		+= p4-clockmod.o
 obj-$(CONFIG_X86_CPUFREQ_NFORCE2)	+= cpufreq-nforce2.o
 obj-$(CONFIG_X86_INTEL_PSTATE)		+= intel_pstate.o
+obj-$(CONFIG_X86_AMD_FREQ_SENSITIVITY)	+= amd_freq_sensitivity.o
 
 ##################################################################################
 # ARM SoC drivers
+obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ)	+= arm_big_little.o
+# big LITTLE per platform glues. Keep DT_BL_CPUFREQ as the last entry in all big
+# LITTLE drivers, so that it is probed last.
+obj-$(CONFIG_ARM_DT_BL_CPUFREQ)		+= arm_big_little_dt.o
+
+obj-$(CONFIG_ARCH_DAVINCI_DA850)	+= davinci-cpufreq.o
 obj-$(CONFIG_UX500_SOC_DB8500)		+= dbx500-cpufreq.o
-obj-$(CONFIG_ARM_S3C2416_CPUFREQ)	+= s3c2416-cpufreq.o
-obj-$(CONFIG_ARM_S3C64XX_CPUFREQ)	+= s3c64xx-cpufreq.o
-obj-$(CONFIG_ARM_S5PV210_CPUFREQ)	+= s5pv210-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS_CPUFREQ)	+= exynos-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ)	+= exynos4210-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ)	+= exynos4x12-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ)	+= exynos5250-cpufreq.o
-obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ)	+= kirkwood-cpufreq.o
-obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ)	+= omap-cpufreq.o
-obj-$(CONFIG_ARM_SPEAR_CPUFREQ)		+= spear-cpufreq.o
+obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ)	+= exynos5440-cpufreq.o
 obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ)	+= highbank-cpufreq.o
 obj-$(CONFIG_ARM_IMX6Q_CPUFREQ)		+= imx6q-cpufreq.o
+obj-$(CONFIG_ARM_INTEGRATOR)		+= integrator-cpufreq.o
+obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ)	+= kirkwood-cpufreq.o
+obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ)	+= omap-cpufreq.o
+obj-$(CONFIG_PXA25x)			+= pxa2xx-cpufreq.o
+obj-$(CONFIG_PXA27x)			+= pxa2xx-cpufreq.o
+obj-$(CONFIG_PXA3xx)			+= pxa3xx-cpufreq.o
+obj-$(CONFIG_ARM_S3C2416_CPUFREQ)	+= s3c2416-cpufreq.o
+obj-$(CONFIG_ARM_S3C64XX_CPUFREQ)	+= s3c64xx-cpufreq.o
+obj-$(CONFIG_ARM_S5PV210_CPUFREQ)	+= s5pv210-cpufreq.o
+obj-$(CONFIG_ARM_SA1100_CPUFREQ)	+= sa1100-cpufreq.o
+obj-$(CONFIG_ARM_SA1110_CPUFREQ)	+= sa1110-cpufreq.o
+obj-$(CONFIG_ARM_SPEAR_CPUFREQ)		+= spear-cpufreq.o
+obj-$(CONFIG_ARCH_TEGRA)		+= tegra-cpufreq.o
 
 ##################################################################################
 # PowerPC platform drivers
+obj-$(CONFIG_CPU_FREQ_CBE)		+= ppc-cbe-cpufreq.o
+ppc-cbe-cpufreq-y			+= ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o
+obj-$(CONFIG_CPU_FREQ_CBE_PMI)		+= ppc_cbe_cpufreq_pmi.o
 obj-$(CONFIG_CPU_FREQ_MAPLE)		+= maple-cpufreq.o
+
+##################################################################################
+# Other platform drivers
+obj-$(CONFIG_AVR32_AT32AP_CPUFREQ)	+= at32ap-cpufreq.o
+obj-$(CONFIG_BLACKFIN)			+= blackfin-cpufreq.o
+obj-$(CONFIG_CRIS_MACH_ARTPEC3)		+= cris-artpec3-cpufreq.o
+obj-$(CONFIG_ETRAXFS)			+= cris-etraxfs-cpufreq.o
+obj-$(CONFIG_IA64_ACPI_CPUFREQ)		+= ia64-acpi-cpufreq.o
+obj-$(CONFIG_LOONGSON2_CPUFREQ)		+= loongson2_cpufreq.o
+obj-$(CONFIG_SH_CPU_FREQ)		+= sh-cpufreq.o
+obj-$(CONFIG_SPARC_US2E_CPUFREQ)	+= sparc-us2e-cpufreq.o
+obj-$(CONFIG_SPARC_US3_CPUFREQ)		+= sparc-us3-cpufreq.o
+obj-$(CONFIG_UNICORE32)			+= unicore2-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 57a8774..11b8b4b 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -423,7 +423,6 @@
 	struct drv_cmd cmd;
 	unsigned int next_state = 0; /* Index into freq_table */
 	unsigned int next_perf_state = 0; /* Index into perf table */
-	unsigned int i;
 	int result = 0;
 
 	pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
@@ -486,10 +485,7 @@
 
 	freqs.old = perf->states[perf->state].core_frequency * 1000;
 	freqs.new = data->freq_table[next_state].frequency;
-	for_each_cpu(i, policy->cpus) {
-		freqs.cpu = i;
-		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-	}
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	drv_write(&cmd);
 
@@ -502,10 +498,7 @@
 		}
 	}
 
-	for_each_cpu(i, policy->cpus) {
-		freqs.cpu = i;
-		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-	}
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 	perf->state = next_perf_state;
 
 out:
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
new file mode 100644
index 0000000..f6b79ab
--- /dev/null
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -0,0 +1,148 @@
+/*
+ * amd_freq_sensitivity.c: AMD frequency sensitivity feedback powersave bias
+ *                         for the ondemand governor.
+ *
+ * Copyright (C) 2013 Advanced Micro Devices, Inc.
+ *
+ * Author: Jacob Shin <jacob.shin@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/percpu-defs.h>
+#include <linux/init.h>
+#include <linux/mod_devicetable.h>
+
+#include <asm/msr.h>
+#include <asm/cpufeature.h>
+
+#include "cpufreq_governor.h"
+
+#define MSR_AMD64_FREQ_SENSITIVITY_ACTUAL	0xc0010080
+#define MSR_AMD64_FREQ_SENSITIVITY_REFERENCE	0xc0010081
+#define CLASS_CODE_SHIFT			56
+#define POWERSAVE_BIAS_MAX			1000
+#define POWERSAVE_BIAS_DEF			400
+
+struct cpu_data_t {
+	u64 actual;
+	u64 reference;
+	unsigned int freq_prev;
+};
+
+static DEFINE_PER_CPU(struct cpu_data_t, cpu_data);
+
+static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
+					      unsigned int freq_next,
+					      unsigned int relation)
+{
+	int sensitivity;
+	long d_actual, d_reference;
+	struct msr actual, reference;
+	struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu);
+	struct dbs_data *od_data = policy->governor_data;
+	struct od_dbs_tuners *od_tuners = od_data->tuners;
+	struct od_cpu_dbs_info_s *od_info =
+		od_data->cdata->get_cpu_dbs_info_s(policy->cpu);
+
+	if (!od_info->freq_table)
+		return freq_next;
+
+	rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL,
+		&actual.l, &actual.h);
+	rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_REFERENCE,
+		&reference.l, &reference.h);
+	actual.h &= 0x00ffffff;
+	reference.h &= 0x00ffffff;
+
+	/* counter wrapped around, so stay on current frequency */
+	if (actual.q < data->actual || reference.q < data->reference) {
+		freq_next = policy->cur;
+		goto out;
+	}
+
+	d_actual = actual.q - data->actual;
+	d_reference = reference.q - data->reference;
+
+	/* divide by 0, so stay on current frequency as well */
+	if (d_reference == 0) {
+		freq_next = policy->cur;
+		goto out;
+	}
+
+	sensitivity = POWERSAVE_BIAS_MAX -
+		(POWERSAVE_BIAS_MAX * (d_reference - d_actual) / d_reference);
+
+	clamp(sensitivity, 0, POWERSAVE_BIAS_MAX);
+
+	/* this workload is not CPU bound, so choose a lower freq */
+	if (sensitivity < od_tuners->powersave_bias) {
+		if (data->freq_prev == policy->cur)
+			freq_next = policy->cur;
+
+		if (freq_next > policy->cur)
+			freq_next = policy->cur;
+		else if (freq_next < policy->cur)
+			freq_next = policy->min;
+		else {
+			unsigned int index;
+
+			cpufreq_frequency_table_target(policy,
+				od_info->freq_table, policy->cur - 1,
+				CPUFREQ_RELATION_H, &index);
+			freq_next = od_info->freq_table[index].frequency;
+		}
+
+		data->freq_prev = freq_next;
+	} else
+		data->freq_prev = 0;
+
+out:
+	data->actual = actual.q;
+	data->reference = reference.q;
+	return freq_next;
+}
+
+static int __init amd_freq_sensitivity_init(void)
+{
+	u64 val;
+
+	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
+		return -ENODEV;
+
+	if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK))
+		return -ENODEV;
+
+	if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
+		return -ENODEV;
+
+	if (!(val >> CLASS_CODE_SHIFT))
+		return -ENODEV;
+
+	od_register_powersave_bias_handler(amd_powersave_bias_target,
+			POWERSAVE_BIAS_DEF);
+	return 0;
+}
+late_initcall(amd_freq_sensitivity_init);
+
+static void __exit amd_freq_sensitivity_exit(void)
+{
+	od_unregister_powersave_bias_handler();
+}
+module_exit(amd_freq_sensitivity_exit);
+
+static const struct x86_cpu_id amd_freq_sensitivity_ids[] = {
+	X86_FEATURE_MATCH(X86_FEATURE_PROC_FEEDBACK),
+	{}
+};
+MODULE_DEVICE_TABLE(x86cpu, amd_freq_sensitivity_ids);
+
+MODULE_AUTHOR("Jacob Shin <jacob.shin@amd.com>");
+MODULE_DESCRIPTION("AMD frequency sensitivity feedback powersave bias for "
+		"the ondemand governor.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
new file mode 100644
index 0000000..dbdf677
--- /dev/null
+++ b/drivers/cpufreq/arm_big_little.c
@@ -0,0 +1,278 @@
+/*
+ * ARM big.LITTLE Platforms CPUFreq support
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ *
+ * Copyright (C) 2013 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/of_platform.h>
+#include <linux/opp.h>
+#include <linux/slab.h>
+#include <linux/topology.h>
+#include <linux/types.h>
+
+#include "arm_big_little.h"
+
+/* Currently we support only two clusters */
+#define MAX_CLUSTERS	2
+
+static struct cpufreq_arm_bL_ops *arm_bL_ops;
+static struct clk *clk[MAX_CLUSTERS];
+static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
+static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
+
+static int cpu_to_cluster(int cpu)
+{
+	return topology_physical_package_id(cpu);
+}
+
+static unsigned int bL_cpufreq_get(unsigned int cpu)
+{
+	u32 cur_cluster = cpu_to_cluster(cpu);
+
+	return clk_get_rate(clk[cur_cluster]) / 1000;
+}
+
+/* Validate policy frequency range */
+static int bL_cpufreq_verify_policy(struct cpufreq_policy *policy)
+{
+	u32 cur_cluster = cpu_to_cluster(policy->cpu);
+
+	return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
+}
+
+/* Set clock frequency */
+static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
+		unsigned int target_freq, unsigned int relation)
+{
+	struct cpufreq_freqs freqs;
+	u32 cpu = policy->cpu, freq_tab_idx, cur_cluster;
+	int ret = 0;
+
+	cur_cluster = cpu_to_cluster(policy->cpu);
+
+	freqs.old = bL_cpufreq_get(policy->cpu);
+
+	/* Determine valid target frequency using freq_table */
+	cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
+			target_freq, relation, &freq_tab_idx);
+	freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency;
+
+	pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n",
+			__func__, cpu, cur_cluster, freqs.old, target_freq,
+			freqs.new);
+
+	if (freqs.old == freqs.new)
+		return 0;
+
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+	ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000);
+	if (ret) {
+		pr_err("clk_set_rate failed: %d\n", ret);
+		return ret;
+	}
+
+	policy->cur = freqs.new;
+
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+	return ret;
+}
+
+static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
+{
+	u32 cluster = cpu_to_cluster(cpu_dev->id);
+
+	if (!atomic_dec_return(&cluster_usage[cluster])) {
+		clk_put(clk[cluster]);
+		opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+		dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
+	}
+}
+
+static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
+{
+	u32 cluster = cpu_to_cluster(cpu_dev->id);
+	char name[14] = "cpu-cluster.";
+	int ret;
+
+	if (atomic_inc_return(&cluster_usage[cluster]) != 1)
+		return 0;
+
+	ret = arm_bL_ops->init_opp_table(cpu_dev);
+	if (ret) {
+		dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
+				__func__, cpu_dev->id, ret);
+		goto atomic_dec;
+	}
+
+	ret = opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
+	if (ret) {
+		dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
+				__func__, cpu_dev->id, ret);
+		goto atomic_dec;
+	}
+
+	name[12] = cluster + '0';
+	clk[cluster] = clk_get_sys(name, NULL);
+	if (!IS_ERR(clk[cluster])) {
+		dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
+				__func__, clk[cluster], freq_table[cluster],
+				cluster);
+		return 0;
+	}
+
+	dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
+			__func__, cpu_dev->id, cluster);
+	ret = PTR_ERR(clk[cluster]);
+	opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+
+atomic_dec:
+	atomic_dec(&cluster_usage[cluster]);
+	dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
+			cluster);
+	return ret;
+}
+
+/* Per-CPU initialization */
+static int bL_cpufreq_init(struct cpufreq_policy *policy)
+{
+	u32 cur_cluster = cpu_to_cluster(policy->cpu);
+	struct device *cpu_dev;
+	int ret;
+
+	cpu_dev = get_cpu_device(policy->cpu);
+	if (!cpu_dev) {
+		pr_err("%s: failed to get cpu%d device\n", __func__,
+				policy->cpu);
+		return -ENODEV;
+	}
+
+	ret = get_cluster_clk_and_freq_table(cpu_dev);
+	if (ret)
+		return ret;
+
+	ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
+	if (ret) {
+		dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
+				policy->cpu, cur_cluster);
+		put_cluster_clk_and_freq_table(cpu_dev);
+		return ret;
+	}
+
+	cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
+
+	if (arm_bL_ops->get_transition_latency)
+		policy->cpuinfo.transition_latency =
+			arm_bL_ops->get_transition_latency(cpu_dev);
+	else
+		policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+
+	policy->cur = bL_cpufreq_get(policy->cpu);
+
+	cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+
+	dev_info(cpu_dev, "CPU %d initialized\n", policy->cpu);
+	return 0;
+}
+
+static int bL_cpufreq_exit(struct cpufreq_policy *policy)
+{
+	struct device *cpu_dev;
+
+	cpu_dev = get_cpu_device(policy->cpu);
+	if (!cpu_dev) {
+		pr_err("%s: failed to get cpu%d device\n", __func__,
+				policy->cpu);
+		return -ENODEV;
+	}
+
+	put_cluster_clk_and_freq_table(cpu_dev);
+	dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
+
+	return 0;
+}
+
+/* Export freq_table to sysfs */
+static struct freq_attr *bL_cpufreq_attr[] = {
+	&cpufreq_freq_attr_scaling_available_freqs,
+	NULL,
+};
+
+static struct cpufreq_driver bL_cpufreq_driver = {
+	.name			= "arm-big-little",
+	.flags			= CPUFREQ_STICKY,
+	.verify			= bL_cpufreq_verify_policy,
+	.target			= bL_cpufreq_set_target,
+	.get			= bL_cpufreq_get,
+	.init			= bL_cpufreq_init,
+	.exit			= bL_cpufreq_exit,
+	.have_governor_per_policy = true,
+	.attr			= bL_cpufreq_attr,
+};
+
+int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
+{
+	int ret;
+
+	if (arm_bL_ops) {
+		pr_debug("%s: Already registered: %s, exiting\n", __func__,
+				arm_bL_ops->name);
+		return -EBUSY;
+	}
+
+	if (!ops || !strlen(ops->name) || !ops->init_opp_table) {
+		pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__);
+		return -ENODEV;
+	}
+
+	arm_bL_ops = ops;
+
+	ret = cpufreq_register_driver(&bL_cpufreq_driver);
+	if (ret) {
+		pr_info("%s: Failed registering platform driver: %s, err: %d\n",
+				__func__, ops->name, ret);
+		arm_bL_ops = NULL;
+	} else {
+		pr_info("%s: Registered platform driver: %s\n", __func__,
+				ops->name);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(bL_cpufreq_register);
+
+void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
+{
+	if (arm_bL_ops != ops) {
+		pr_err("%s: Registered with: %s, can't unregister, exiting\n",
+				__func__, arm_bL_ops->name);
+		return;
+	}
+
+	cpufreq_unregister_driver(&bL_cpufreq_driver);
+	pr_info("%s: Un-registered platform driver: %s\n", __func__,
+			arm_bL_ops->name);
+	arm_bL_ops = NULL;
+}
+EXPORT_SYMBOL_GPL(bL_cpufreq_unregister);
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
new file mode 100644
index 0000000..70f18fc
--- /dev/null
+++ b/drivers/cpufreq/arm_big_little.h
@@ -0,0 +1,40 @@
+/*
+ * ARM big.LITTLE platform's CPUFreq header file
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ *
+ * Copyright (C) 2013 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef CPUFREQ_ARM_BIG_LITTLE_H
+#define CPUFREQ_ARM_BIG_LITTLE_H
+
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/types.h>
+
+struct cpufreq_arm_bL_ops {
+	char name[CPUFREQ_NAME_LEN];
+	int (*get_transition_latency)(struct device *cpu_dev);
+
+	/*
+	 * This must set opp table for cpu_dev in a similar way as done by
+	 * of_init_opp_table().
+	 */
+	int (*init_opp_table)(struct device *cpu_dev);
+};
+
+int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
+void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
+
+#endif /* CPUFREQ_ARM_BIG_LITTLE_H */
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
new file mode 100644
index 0000000..44be311
--- /dev/null
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -0,0 +1,107 @@
+/*
+ * Generic big.LITTLE CPUFreq Interface driver
+ *
+ * It provides necessary ops to arm_big_little cpufreq driver and gets
+ * Frequency information from Device Tree. Freq table in DT must be in KHz.
+ *
+ * Copyright (C) 2013 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufreq.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/opp.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include "arm_big_little.h"
+
+static int dt_init_opp_table(struct device *cpu_dev)
+{
+	struct device_node *np, *parent;
+	int count = 0, ret;
+
+	parent = of_find_node_by_path("/cpus");
+	if (!parent) {
+		pr_err("failed to find OF /cpus\n");
+		return -ENOENT;
+	}
+
+	for_each_child_of_node(parent, np) {
+		if (count++ != cpu_dev->id)
+			continue;
+		if (!of_get_property(np, "operating-points", NULL)) {
+			ret = -ENODATA;
+		} else {
+			cpu_dev->of_node = np;
+			ret = of_init_opp_table(cpu_dev);
+		}
+		of_node_put(np);
+		of_node_put(parent);
+
+		return ret;
+	}
+
+	return -ENODEV;
+}
+
+static int dt_get_transition_latency(struct device *cpu_dev)
+{
+	struct device_node *np, *parent;
+	u32 transition_latency = CPUFREQ_ETERNAL;
+	int count = 0;
+
+	parent = of_find_node_by_path("/cpus");
+	if (!parent) {
+		pr_err("failed to find OF /cpus\n");
+		return -ENOENT;
+	}
+
+	for_each_child_of_node(parent, np) {
+		if (count++ != cpu_dev->id)
+			continue;
+
+		of_property_read_u32(np, "clock-latency", &transition_latency);
+		of_node_put(np);
+		of_node_put(parent);
+
+		return 0;
+	}
+
+	return -ENODEV;
+}
+
+static struct cpufreq_arm_bL_ops dt_bL_ops = {
+	.name	= "dt-bl",
+	.get_transition_latency = dt_get_transition_latency,
+	.init_opp_table = dt_init_opp_table,
+};
+
+static int generic_bL_init(void)
+{
+	return bL_cpufreq_register(&dt_bL_ops);
+}
+module_init(generic_bL_init);
+
+static void generic_bL_exit(void)
+{
+	return bL_cpufreq_unregister(&dt_bL_ops);
+}
+module_exit(generic_bL_exit);
+
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT");
+MODULE_LICENSE("GPL");
diff --git a/arch/avr32/mach-at32ap/cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
similarity index 95%
rename from arch/avr32/mach-at32ap/cpufreq.c
rename to drivers/cpufreq/at32ap-cpufreq.c
index 18b7656..6544887 100644
--- a/arch/avr32/mach-at32ap/cpufreq.c
+++ b/drivers/cpufreq/at32ap-cpufreq.c
@@ -61,7 +61,6 @@
 
 	freqs.old = at32_get_speed(0);
 	freqs.new = (freq + 500) / 1000;
-	freqs.cpu = 0;
 	freqs.flags = 0;
 
 	if (!ref_freq) {
@@ -69,7 +68,7 @@
 		loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
 	}
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 	if (freqs.old < freqs.new)
 		boot_cpu_data.loops_per_jiffy = cpufreq_scale(
 				loops_per_jiffy_ref, ref_freq, freqs.new);
@@ -77,7 +76,7 @@
 	if (freqs.new < freqs.old)
 		boot_cpu_data.loops_per_jiffy = cpufreq_scale(
 				loops_per_jiffy_ref, ref_freq, freqs.new);
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	pr_debug("cpufreq: set frequency %lu Hz\n", freq);
 
diff --git a/arch/blackfin/mach-common/cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c
similarity index 75%
rename from arch/blackfin/mach-common/cpufreq.c
rename to drivers/cpufreq/blackfin-cpufreq.c
index d88bd31..995511e80 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/drivers/cpufreq/blackfin-cpufreq.c
@@ -127,13 +127,13 @@
 }
 #endif
 
-static int bfin_target(struct cpufreq_policy *poli,
+static int bfin_target(struct cpufreq_policy *policy,
 			unsigned int target_freq, unsigned int relation)
 {
 #ifndef CONFIG_BF60x
 	unsigned int plldiv;
 #endif
-	unsigned int index, cpu;
+	unsigned int index;
 	unsigned long cclk_hz;
 	struct cpufreq_freqs freqs;
 	static unsigned long lpj_ref;
@@ -144,59 +144,48 @@
 	cycles_t cycles;
 #endif
 
-	for_each_online_cpu(cpu) {
-		struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+	if (cpufreq_frequency_table_target(policy, bfin_freq_table, target_freq,
+				relation, &index))
+		return -EINVAL;
 
-		if (!policy)
-			continue;
+	cclk_hz = bfin_freq_table[index].frequency;
 
-		if (cpufreq_frequency_table_target(policy, bfin_freq_table,
-				 target_freq, relation, &index))
-			return -EINVAL;
+	freqs.old = bfin_getfreq_khz(0);
+	freqs.new = cclk_hz;
 
-		cclk_hz = bfin_freq_table[index].frequency;
+	pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
+			cclk_hz, target_freq, freqs.old);
 
-		freqs.old = bfin_getfreq_khz(0);
-		freqs.new = cclk_hz;
-		freqs.cpu = cpu;
-
-		pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
-			 cclk_hz, target_freq, freqs.old);
-
-		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-		if (cpu == CPUFREQ_CPU) {
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 #ifndef CONFIG_BF60x
-			plldiv = (bfin_read_PLL_DIV() & SSEL) |
-						dpm_state_table[index].csel;
-			bfin_write_PLL_DIV(plldiv);
+	plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel;
+	bfin_write_PLL_DIV(plldiv);
 #else
-			ret = cpu_set_cclk(cpu, freqs.new * 1000);
-			if (ret != 0) {
-				WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret);
-				break;
-			}
-#endif
-			on_each_cpu(bfin_adjust_core_timer, &index, 1);
-#if defined(CONFIG_CYCLES_CLOCKSOURCE)
-			cycles = get_cycles();
-			SSYNC();
-			cycles += 10; /* ~10 cycles we lose after get_cycles() */
-			__bfin_cycles_off +=
-			    (cycles << __bfin_cycles_mod) - (cycles << index);
-			__bfin_cycles_mod = index;
-#endif
-			if (!lpj_ref_freq) {
-				lpj_ref = loops_per_jiffy;
-				lpj_ref_freq = freqs.old;
-			}
-			if (freqs.new != freqs.old) {
-				loops_per_jiffy = cpufreq_scale(lpj_ref,
-						lpj_ref_freq, freqs.new);
-			}
-		}
-		/* TODO: just test case for cycles clock source, remove later */
-		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	ret = cpu_set_cclk(policy->cpu, freqs.new * 1000);
+	if (ret != 0) {
+		WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret);
+		return ret;
 	}
+#endif
+	on_each_cpu(bfin_adjust_core_timer, &index, 1);
+#if defined(CONFIG_CYCLES_CLOCKSOURCE)
+	cycles = get_cycles();
+	SSYNC();
+	cycles += 10; /* ~10 cycles we lose after get_cycles() */
+	__bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index);
+	__bfin_cycles_mod = index;
+#endif
+	if (!lpj_ref_freq) {
+		lpj_ref = loops_per_jiffy;
+		lpj_ref_freq = freqs.old;
+	}
+	if (freqs.new != freqs.old) {
+		loops_per_jiffy = cpufreq_scale(lpj_ref,
+				lpj_ref_freq, freqs.new);
+	}
+
+	/* TODO: just test case for cycles clock source, remove later */
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	pr_debug("cpufreq: done\n");
 	return ret;
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 37d23a0..3ab8294 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -44,8 +44,9 @@
 {
 	struct cpufreq_freqs freqs;
 	struct opp *opp;
-	unsigned long freq_Hz, volt = 0, volt_old = 0, tol = 0;
-	unsigned int index, cpu;
+	unsigned long volt = 0, volt_old = 0, tol = 0;
+	long freq_Hz;
+	unsigned int index;
 	int ret;
 
 	ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
@@ -65,10 +66,7 @@
 	if (freqs.old == freqs.new)
 		return 0;
 
-	for_each_online_cpu(cpu) {
-		freqs.cpu = cpu;
-		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-	}
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	if (cpu_reg) {
 		rcu_read_lock();
@@ -76,7 +74,9 @@
 		if (IS_ERR(opp)) {
 			rcu_read_unlock();
 			pr_err("failed to find OPP for %ld\n", freq_Hz);
-			return PTR_ERR(opp);
+			freqs.new = freqs.old;
+			ret = PTR_ERR(opp);
+			goto post_notify;
 		}
 		volt = opp_get_voltage(opp);
 		rcu_read_unlock();
@@ -94,7 +94,7 @@
 		if (ret) {
 			pr_err("failed to scale voltage up: %d\n", ret);
 			freqs.new = freqs.old;
-			return ret;
+			goto post_notify;
 		}
 	}
 
@@ -103,7 +103,8 @@
 		pr_err("failed to set clock rate: %d\n", ret);
 		if (cpu_reg)
 			regulator_set_voltage_tol(cpu_reg, volt_old, tol);
-		return ret;
+		freqs.new = freqs.old;
+		goto post_notify;
 	}
 
 	/* scaling down?  scale voltage after frequency */
@@ -113,25 +114,19 @@
 			pr_err("failed to scale voltage down: %d\n", ret);
 			clk_set_rate(cpu_clk, freqs.old * 1000);
 			freqs.new = freqs.old;
-			return ret;
 		}
 	}
 
-	for_each_online_cpu(cpu) {
-		freqs.cpu = cpu;
-		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-	}
+post_notify:
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
-	return 0;
+	return ret;
 }
 
 static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
 {
 	int ret;
 
-	if (policy->cpu != 0)
-		return -EINVAL;
-
 	ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
 	if (ret) {
 		pr_err("invalid frequency table: %d\n", ret);
@@ -262,6 +257,7 @@
 	}
 
 	of_node_put(np);
+	of_node_put(parent);
 	return 0;
 
 out_free_table:
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index 13d311e..af1542d 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -263,7 +263,6 @@
 
 	freqs.old = nforce2_get(policy->cpu);
 	freqs.new = target_fsb * fid * 100;
-	freqs.cpu = 0;		/* Only one CPU on nForce2 platforms */
 
 	if (freqs.old == freqs.new)
 		return 0;
@@ -271,7 +270,7 @@
 	pr_debug("Old CPU frequency %d kHz, new %d kHz\n",
 	       freqs.old, freqs.new);
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	/* Disable IRQs */
 	/* local_irq_save(flags); */
@@ -286,7 +285,7 @@
 	/* Enable IRQs */
 	/* local_irq_restore(flags); */
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	return 0;
 }
@@ -360,12 +359,10 @@
 		min_fsb = NFORCE2_MIN_FSB;
 
 	/* cpuinfo and default policy values */
-	policy->cpuinfo.min_freq = min_fsb * fid * 100;
-	policy->cpuinfo.max_freq = max_fsb * fid * 100;
+	policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100;
+	policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100;
 	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
 	policy->cur = nforce2_get(policy->cpu);
-	policy->min = policy->cpuinfo.min_freq;
-	policy->max = policy->cpuinfo.max_freq;
 
 	return 0;
 }
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b02824d..1b8a48e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -45,7 +45,7 @@
 /* This one keeps track of the previously set governor of a removed CPU */
 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
 #endif
-static DEFINE_SPINLOCK(cpufreq_driver_lock);
+static DEFINE_RWLOCK(cpufreq_driver_lock);
 
 /*
  * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
@@ -128,6 +128,11 @@
 static LIST_HEAD(cpufreq_governor_list);
 static DEFINE_MUTEX(cpufreq_governor_mutex);
 
+bool have_governor_per_policy(void)
+{
+	return cpufreq_driver->have_governor_per_policy;
+}
+
 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
 {
 	struct cpufreq_policy *data;
@@ -137,7 +142,7 @@
 		goto err_out;
 
 	/* get the cpufreq driver */
-	spin_lock_irqsave(&cpufreq_driver_lock, flags);
+	read_lock_irqsave(&cpufreq_driver_lock, flags);
 
 	if (!cpufreq_driver)
 		goto err_out_unlock;
@@ -155,13 +160,13 @@
 	if (!sysfs && !kobject_get(&data->kobj))
 		goto err_out_put_module;
 
-	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 	return data;
 
 err_out_put_module:
 	module_put(cpufreq_driver->owner);
 err_out_unlock:
-	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 err_out:
 	return NULL;
 }
@@ -244,19 +249,9 @@
 #endif
 
 
-/**
- * cpufreq_notify_transition - call notifier chain and adjust_jiffies
- * on frequency transition.
- *
- * This function calls the transition notifiers and the "adjust_jiffies"
- * function. It is called twice on all CPU frequency changes that have
- * external effects.
- */
-void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
+void __cpufreq_notify_transition(struct cpufreq_policy *policy,
+		struct cpufreq_freqs *freqs, unsigned int state)
 {
-	struct cpufreq_policy *policy;
-	unsigned long flags;
-
 	BUG_ON(irqs_disabled());
 
 	if (cpufreq_disabled())
@@ -266,10 +261,6 @@
 	pr_debug("notification %u of frequency transition to %u kHz\n",
 		state, freqs->new);
 
-	spin_lock_irqsave(&cpufreq_driver_lock, flags);
-	policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
-	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
 	switch (state) {
 
 	case CPUFREQ_PRECHANGE:
@@ -303,6 +294,20 @@
 		break;
 	}
 }
+/**
+ * cpufreq_notify_transition - call notifier chain and adjust_jiffies
+ * on frequency transition.
+ *
+ * This function calls the transition notifiers and the "adjust_jiffies"
+ * function. It is called twice on all CPU frequency changes that have
+ * external effects.
+ */
+void cpufreq_notify_transition(struct cpufreq_policy *policy,
+		struct cpufreq_freqs *freqs, unsigned int state)
+{
+	for_each_cpu(freqs->cpu, policy->cpus)
+		__cpufreq_notify_transition(policy, freqs, state);
+}
 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
 
 
@@ -765,12 +770,12 @@
 			goto err_out_kobj_put;
 	}
 
-	spin_lock_irqsave(&cpufreq_driver_lock, flags);
+	write_lock_irqsave(&cpufreq_driver_lock, flags);
 	for_each_cpu(j, policy->cpus) {
 		per_cpu(cpufreq_cpu_data, j) = policy;
 		per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
 	}
-	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
 	ret = cpufreq_add_dev_symlink(cpu, policy);
 	if (ret)
@@ -803,27 +808,30 @@
 				  struct device *dev)
 {
 	struct cpufreq_policy *policy;
-	int ret = 0;
+	int ret = 0, has_target = !!cpufreq_driver->target;
 	unsigned long flags;
 
 	policy = cpufreq_cpu_get(sibling);
 	WARN_ON(!policy);
 
-	__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+	if (has_target)
+		__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
 
 	lock_policy_rwsem_write(sibling);
 
-	spin_lock_irqsave(&cpufreq_driver_lock, flags);
+	write_lock_irqsave(&cpufreq_driver_lock, flags);
 
 	cpumask_set_cpu(cpu, policy->cpus);
 	per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
 	per_cpu(cpufreq_cpu_data, cpu) = policy;
-	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
 	unlock_policy_rwsem_write(sibling);
 
-	__cpufreq_governor(policy, CPUFREQ_GOV_START);
-	__cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+	if (has_target) {
+		__cpufreq_governor(policy, CPUFREQ_GOV_START);
+		__cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+	}
 
 	ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
 	if (ret) {
@@ -871,15 +879,15 @@
 
 #ifdef CONFIG_HOTPLUG_CPU
 	/* Check if this cpu was hot-unplugged earlier and has siblings */
-	spin_lock_irqsave(&cpufreq_driver_lock, flags);
+	read_lock_irqsave(&cpufreq_driver_lock, flags);
 	for_each_online_cpu(sibling) {
 		struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
 		if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
-			spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+			read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 			return cpufreq_add_policy_cpu(cpu, sibling, dev);
 		}
 	}
-	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 #endif
 #endif
 
@@ -952,10 +960,10 @@
 	return 0;
 
 err_out_unregister:
-	spin_lock_irqsave(&cpufreq_driver_lock, flags);
+	write_lock_irqsave(&cpufreq_driver_lock, flags);
 	for_each_cpu(j, policy->cpus)
 		per_cpu(cpufreq_cpu_data, j) = NULL;
-	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
 	kobject_put(&policy->kobj);
 	wait_for_completion(&policy->kobj_unregister);
@@ -1008,12 +1016,12 @@
 
 	pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
 
-	spin_lock_irqsave(&cpufreq_driver_lock, flags);
+	write_lock_irqsave(&cpufreq_driver_lock, flags);
 
 	data = per_cpu(cpufreq_cpu_data, cpu);
 	per_cpu(cpufreq_cpu_data, cpu) = NULL;
 
-	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
 	if (!data) {
 		pr_debug("%s: No cpu_data found\n", __func__);
@@ -1031,7 +1039,9 @@
 
 	WARN_ON(lock_policy_rwsem_write(cpu));
 	cpus = cpumask_weight(data->cpus);
-	cpumask_clear_cpu(cpu, data->cpus);
+
+	if (cpus > 1)
+		cpumask_clear_cpu(cpu, data->cpus);
 	unlock_policy_rwsem_write(cpu);
 
 	if (cpu != data->cpu) {
@@ -1047,9 +1057,9 @@
 			WARN_ON(lock_policy_rwsem_write(cpu));
 			cpumask_set_cpu(cpu, data->cpus);
 
-			spin_lock_irqsave(&cpufreq_driver_lock, flags);
+			write_lock_irqsave(&cpufreq_driver_lock, flags);
 			per_cpu(cpufreq_cpu_data, cpu) = data;
-			spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+			write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
 			unlock_policy_rwsem_write(cpu);
 
@@ -1070,6 +1080,9 @@
 
 	/* If cpu is last user of policy, free policy */
 	if (cpus == 1) {
+		if (cpufreq_driver->target)
+			__cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
+
 		lock_policy_rwsem_read(cpu);
 		kobj = &data->kobj;
 		cmp = &data->kobj_unregister;
@@ -1134,16 +1147,23 @@
 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
 				unsigned int new_freq)
 {
+	struct cpufreq_policy *policy;
 	struct cpufreq_freqs freqs;
+	unsigned long flags;
+
 
 	pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
 	       "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
 
-	freqs.cpu = cpu;
 	freqs.old = old_freq;
 	freqs.new = new_freq;
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+	read_lock_irqsave(&cpufreq_driver_lock, flags);
+	policy = per_cpu(cpufreq_cpu_data, cpu);
+	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 }
 
 
@@ -1544,10 +1564,12 @@
 						policy->cpu, event);
 	ret = policy->governor->governor(policy, event);
 
-	if (event == CPUFREQ_GOV_START)
-		policy->governor->initialized++;
-	else if (event == CPUFREQ_GOV_STOP)
-		policy->governor->initialized--;
+	if (!ret) {
+		if (event == CPUFREQ_GOV_POLICY_INIT)
+			policy->governor->initialized++;
+		else if (event == CPUFREQ_GOV_POLICY_EXIT)
+			policy->governor->initialized--;
+	}
 
 	/* we keep one module reference alive for
 			each CPU governed by this CPU */
@@ -1651,7 +1673,7 @@
 static int __cpufreq_set_policy(struct cpufreq_policy *data,
 				struct cpufreq_policy *policy)
 {
-	int ret = 0;
+	int ret = 0, failed = 1;
 
 	pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
 		policy->min, policy->max);
@@ -1705,18 +1727,31 @@
 			pr_debug("governor switch\n");
 
 			/* end old governor */
-			if (data->governor)
+			if (data->governor) {
 				__cpufreq_governor(data, CPUFREQ_GOV_STOP);
+				__cpufreq_governor(data,
+						CPUFREQ_GOV_POLICY_EXIT);
+			}
 
 			/* start new governor */
 			data->governor = policy->governor;
-			if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
+			if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
+				if (!__cpufreq_governor(data, CPUFREQ_GOV_START))
+					failed = 0;
+				else
+					__cpufreq_governor(data,
+							CPUFREQ_GOV_POLICY_EXIT);
+			}
+
+			if (failed) {
 				/* new governor failed, so re-start old one */
 				pr_debug("starting governor %s failed\n",
 							data->governor->name);
 				if (old_gov) {
 					data->governor = old_gov;
 					__cpufreq_governor(data,
+							CPUFREQ_GOV_POLICY_INIT);
+					__cpufreq_governor(data,
 							   CPUFREQ_GOV_START);
 				}
 				ret = -EINVAL;
@@ -1848,13 +1883,13 @@
 	if (driver_data->setpolicy)
 		driver_data->flags |= CPUFREQ_CONST_LOOPS;
 
-	spin_lock_irqsave(&cpufreq_driver_lock, flags);
+	write_lock_irqsave(&cpufreq_driver_lock, flags);
 	if (cpufreq_driver) {
-		spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 		return -EBUSY;
 	}
 	cpufreq_driver = driver_data;
-	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
 	ret = subsys_interface_register(&cpufreq_interface);
 	if (ret)
@@ -1886,9 +1921,9 @@
 err_if_unreg:
 	subsys_interface_unregister(&cpufreq_interface);
 err_null_driver:
-	spin_lock_irqsave(&cpufreq_driver_lock, flags);
+	write_lock_irqsave(&cpufreq_driver_lock, flags);
 	cpufreq_driver = NULL;
-	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
@@ -1914,9 +1949,9 @@
 	subsys_interface_unregister(&cpufreq_interface);
 	unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
 
-	spin_lock_irqsave(&cpufreq_driver_lock, flags);
+	write_lock_irqsave(&cpufreq_driver_lock, flags);
 	cpufreq_driver = NULL;
-	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
 	return 0;
 }
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 4fd0006..0ceb2ef 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -20,6 +20,7 @@
 #include <linux/mutex.h>
 #include <linux/notifier.h>
 #include <linux/percpu-defs.h>
+#include <linux/slab.h>
 #include <linux/sysfs.h>
 #include <linux/types.h>
 
@@ -28,25 +29,29 @@
 /* Conservative governor macros */
 #define DEF_FREQUENCY_UP_THRESHOLD		(80)
 #define DEF_FREQUENCY_DOWN_THRESHOLD		(20)
+#define DEF_FREQUENCY_STEP			(5)
 #define DEF_SAMPLING_DOWN_FACTOR		(1)
 #define MAX_SAMPLING_DOWN_FACTOR		(10)
 
-static struct dbs_data cs_dbs_data;
 static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
 
-static struct cs_dbs_tuners cs_tuners = {
-	.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
-	.down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
-	.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
-	.ignore_nice = 0,
-	.freq_step = 5,
-};
+static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
+					   struct cpufreq_policy *policy)
+{
+	unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100;
+
+	/* max freq cannot be less than 100. But who knows... */
+	if (unlikely(freq_target == 0))
+		freq_target = DEF_FREQUENCY_STEP;
+
+	return freq_target;
+}
 
 /*
  * Every sampling_rate, we check, if current idle time is less than 20%
- * (default), then we try to increase frequency Every sampling_rate *
- * sampling_down_factor, we check, if current idle time is more than 80%, then
- * we try to decrease frequency
+ * (default), then we try to increase frequency. Every sampling_rate *
+ * sampling_down_factor, we check, if current idle time is more than 80%
+ * (default), then we try to decrease frequency
  *
  * Any frequency increase takes it to the maximum frequency. Frequency reduction
  * happens at minimum steps of 5% (default) of maximum frequency
@@ -55,30 +60,25 @@
 {
 	struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
 	struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
-	unsigned int freq_target;
+	struct dbs_data *dbs_data = policy->governor_data;
+	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 
 	/*
 	 * break out if we 'cannot' reduce the speed as the user might
 	 * want freq_step to be zero
 	 */
-	if (cs_tuners.freq_step == 0)
+	if (cs_tuners->freq_step == 0)
 		return;
 
 	/* Check for frequency increase */
-	if (load > cs_tuners.up_threshold) {
+	if (load > cs_tuners->up_threshold) {
 		dbs_info->down_skip = 0;
 
 		/* if we are already at full speed then break out early */
 		if (dbs_info->requested_freq == policy->max)
 			return;
 
-		freq_target = (cs_tuners.freq_step * policy->max) / 100;
-
-		/* max freq cannot be less than 100. But who knows.... */
-		if (unlikely(freq_target == 0))
-			freq_target = 5;
-
-		dbs_info->requested_freq += freq_target;
+		dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
 		if (dbs_info->requested_freq > policy->max)
 			dbs_info->requested_freq = policy->max;
 
@@ -87,45 +87,48 @@
 		return;
 	}
 
-	/*
-	 * The optimal frequency is the frequency that is the lowest that can
-	 * support the current CPU usage without triggering the up policy. To be
-	 * safe, we focus 10 points under the threshold.
-	 */
-	if (load < (cs_tuners.down_threshold - 10)) {
-		freq_target = (cs_tuners.freq_step * policy->max) / 100;
+	/* if sampling_down_factor is active break out early */
+	if (++dbs_info->down_skip < cs_tuners->sampling_down_factor)
+		return;
+	dbs_info->down_skip = 0;
 
-		dbs_info->requested_freq -= freq_target;
-		if (dbs_info->requested_freq < policy->min)
-			dbs_info->requested_freq = policy->min;
-
+	/* Check for frequency decrease */
+	if (load < cs_tuners->down_threshold) {
 		/*
 		 * if we cannot reduce the frequency anymore, break out early
 		 */
 		if (policy->cur == policy->min)
 			return;
 
+		dbs_info->requested_freq -= get_freq_target(cs_tuners, policy);
+		if (dbs_info->requested_freq < policy->min)
+			dbs_info->requested_freq = policy->min;
+
 		__cpufreq_driver_target(policy, dbs_info->requested_freq,
-				CPUFREQ_RELATION_H);
+				CPUFREQ_RELATION_L);
 		return;
 	}
 }
 
 static void cs_dbs_timer(struct work_struct *work)
 {
-	struct delayed_work *dw = to_delayed_work(work);
 	struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
 			struct cs_cpu_dbs_info_s, cdbs.work.work);
 	unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
 	struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
 			cpu);
-	int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);
+	struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
+	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+	int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
+	bool modify_all = true;
 
 	mutex_lock(&core_dbs_info->cdbs.timer_mutex);
-	if (need_load_eval(&core_dbs_info->cdbs, cs_tuners.sampling_rate))
-		dbs_check_cpu(&cs_dbs_data, cpu);
+	if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate))
+		modify_all = false;
+	else
+		dbs_check_cpu(dbs_data, cpu);
 
-	schedule_delayed_work_on(smp_processor_id(), dw, delay);
+	gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
 	mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
 }
 
@@ -154,16 +157,12 @@
 }
 
 /************************** sysfs interface ************************/
-static ssize_t show_sampling_rate_min(struct kobject *kobj,
-				      struct attribute *attr, char *buf)
-{
-	return sprintf(buf, "%u\n", cs_dbs_data.min_sampling_rate);
-}
+static struct common_dbs_data cs_dbs_cdata;
 
-static ssize_t store_sampling_down_factor(struct kobject *a,
-					  struct attribute *b,
-					  const char *buf, size_t count)
+static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
+		const char *buf, size_t count)
 {
+	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 	unsigned int input;
 	int ret;
 	ret = sscanf(buf, "%u", &input);
@@ -171,13 +170,14 @@
 	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
 		return -EINVAL;
 
-	cs_tuners.sampling_down_factor = input;
+	cs_tuners->sampling_down_factor = input;
 	return count;
 }
 
-static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
-				   const char *buf, size_t count)
+static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+		size_t count)
 {
+	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 	unsigned int input;
 	int ret;
 	ret = sscanf(buf, "%u", &input);
@@ -185,43 +185,46 @@
 	if (ret != 1)
 		return -EINVAL;
 
-	cs_tuners.sampling_rate = max(input, cs_dbs_data.min_sampling_rate);
+	cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate);
 	return count;
 }
 
-static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
-				  const char *buf, size_t count)
+static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
+		size_t count)
 {
+	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 	unsigned int input;
 	int ret;
 	ret = sscanf(buf, "%u", &input);
 
-	if (ret != 1 || input > 100 || input <= cs_tuners.down_threshold)
+	if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
 		return -EINVAL;
 
-	cs_tuners.up_threshold = input;
+	cs_tuners->up_threshold = input;
 	return count;
 }
 
-static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
-				    const char *buf, size_t count)
+static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
+		size_t count)
 {
+	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 	unsigned int input;
 	int ret;
 	ret = sscanf(buf, "%u", &input);
 
 	/* cannot be lower than 11 otherwise freq will not fall */
 	if (ret != 1 || input < 11 || input > 100 ||
-			input >= cs_tuners.up_threshold)
+			input >= cs_tuners->up_threshold)
 		return -EINVAL;
 
-	cs_tuners.down_threshold = input;
+	cs_tuners->down_threshold = input;
 	return count;
 }
 
-static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
-				      const char *buf, size_t count)
+static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
+		size_t count)
 {
+	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 	unsigned int input, j;
 	int ret;
 
@@ -232,27 +235,28 @@
 	if (input > 1)
 		input = 1;
 
-	if (input == cs_tuners.ignore_nice) /* nothing to do */
+	if (input == cs_tuners->ignore_nice) /* nothing to do */
 		return count;
 
-	cs_tuners.ignore_nice = input;
+	cs_tuners->ignore_nice = input;
 
 	/* we need to re-evaluate prev_cpu_idle */
 	for_each_online_cpu(j) {
 		struct cs_cpu_dbs_info_s *dbs_info;
 		dbs_info = &per_cpu(cs_cpu_dbs_info, j);
 		dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
-						&dbs_info->cdbs.prev_cpu_wall);
-		if (cs_tuners.ignore_nice)
+					&dbs_info->cdbs.prev_cpu_wall, 0);
+		if (cs_tuners->ignore_nice)
 			dbs_info->cdbs.prev_cpu_nice =
 				kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 	}
 	return count;
 }
 
-static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
-			       const char *buf, size_t count)
+static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
+		size_t count)
 {
+	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 	unsigned int input;
 	int ret;
 	ret = sscanf(buf, "%u", &input);
@@ -267,43 +271,88 @@
 	 * no need to test here if freq_step is zero as the user might actually
 	 * want this, they would be crazy though :)
 	 */
-	cs_tuners.freq_step = input;
+	cs_tuners->freq_step = input;
 	return count;
 }
 
-show_one(cs, sampling_rate, sampling_rate);
-show_one(cs, sampling_down_factor, sampling_down_factor);
-show_one(cs, up_threshold, up_threshold);
-show_one(cs, down_threshold, down_threshold);
-show_one(cs, ignore_nice_load, ignore_nice);
-show_one(cs, freq_step, freq_step);
+show_store_one(cs, sampling_rate);
+show_store_one(cs, sampling_down_factor);
+show_store_one(cs, up_threshold);
+show_store_one(cs, down_threshold);
+show_store_one(cs, ignore_nice);
+show_store_one(cs, freq_step);
+declare_show_sampling_rate_min(cs);
 
-define_one_global_rw(sampling_rate);
-define_one_global_rw(sampling_down_factor);
-define_one_global_rw(up_threshold);
-define_one_global_rw(down_threshold);
-define_one_global_rw(ignore_nice_load);
-define_one_global_rw(freq_step);
-define_one_global_ro(sampling_rate_min);
+gov_sys_pol_attr_rw(sampling_rate);
+gov_sys_pol_attr_rw(sampling_down_factor);
+gov_sys_pol_attr_rw(up_threshold);
+gov_sys_pol_attr_rw(down_threshold);
+gov_sys_pol_attr_rw(ignore_nice);
+gov_sys_pol_attr_rw(freq_step);
+gov_sys_pol_attr_ro(sampling_rate_min);
 
-static struct attribute *dbs_attributes[] = {
-	&sampling_rate_min.attr,
-	&sampling_rate.attr,
-	&sampling_down_factor.attr,
-	&up_threshold.attr,
-	&down_threshold.attr,
-	&ignore_nice_load.attr,
-	&freq_step.attr,
+static struct attribute *dbs_attributes_gov_sys[] = {
+	&sampling_rate_min_gov_sys.attr,
+	&sampling_rate_gov_sys.attr,
+	&sampling_down_factor_gov_sys.attr,
+	&up_threshold_gov_sys.attr,
+	&down_threshold_gov_sys.attr,
+	&ignore_nice_gov_sys.attr,
+	&freq_step_gov_sys.attr,
 	NULL
 };
 
-static struct attribute_group cs_attr_group = {
-	.attrs = dbs_attributes,
+static struct attribute_group cs_attr_group_gov_sys = {
+	.attrs = dbs_attributes_gov_sys,
+	.name = "conservative",
+};
+
+static struct attribute *dbs_attributes_gov_pol[] = {
+	&sampling_rate_min_gov_pol.attr,
+	&sampling_rate_gov_pol.attr,
+	&sampling_down_factor_gov_pol.attr,
+	&up_threshold_gov_pol.attr,
+	&down_threshold_gov_pol.attr,
+	&ignore_nice_gov_pol.attr,
+	&freq_step_gov_pol.attr,
+	NULL
+};
+
+static struct attribute_group cs_attr_group_gov_pol = {
+	.attrs = dbs_attributes_gov_pol,
 	.name = "conservative",
 };
 
 /************************** sysfs end ************************/
 
+static int cs_init(struct dbs_data *dbs_data)
+{
+	struct cs_dbs_tuners *tuners;
+
+	tuners = kzalloc(sizeof(struct cs_dbs_tuners), GFP_KERNEL);
+	if (!tuners) {
+		pr_err("%s: kzalloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
+	tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
+	tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
+	tuners->ignore_nice = 0;
+	tuners->freq_step = DEF_FREQUENCY_STEP;
+
+	dbs_data->tuners = tuners;
+	dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
+		jiffies_to_usecs(10);
+	mutex_init(&dbs_data->mutex);
+	return 0;
+}
+
+static void cs_exit(struct dbs_data *dbs_data)
+{
+	kfree(dbs_data->tuners);
+}
+
 define_get_cpu_dbs_routines(cs_cpu_dbs_info);
 
 static struct notifier_block cs_cpufreq_notifier_block = {
@@ -314,21 +363,23 @@
 	.notifier_block = &cs_cpufreq_notifier_block,
 };
 
-static struct dbs_data cs_dbs_data = {
+static struct common_dbs_data cs_dbs_cdata = {
 	.governor = GOV_CONSERVATIVE,
-	.attr_group = &cs_attr_group,
-	.tuners = &cs_tuners,
+	.attr_group_gov_sys = &cs_attr_group_gov_sys,
+	.attr_group_gov_pol = &cs_attr_group_gov_pol,
 	.get_cpu_cdbs = get_cpu_cdbs,
 	.get_cpu_dbs_info_s = get_cpu_dbs_info_s,
 	.gov_dbs_timer = cs_dbs_timer,
 	.gov_check_cpu = cs_check_cpu,
 	.gov_ops = &cs_ops,
+	.init = cs_init,
+	.exit = cs_exit,
 };
 
 static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
 				   unsigned int event)
 {
-	return cpufreq_governor_dbs(&cs_dbs_data, policy, event);
+	return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event);
 }
 
 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
@@ -343,7 +394,6 @@
 
 static int __init cpufreq_gov_dbs_init(void)
 {
-	mutex_init(&cs_dbs_data.mutex);
 	return cpufreq_register_governor(&cpufreq_gov_conservative);
 }
 
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 5a76086..443442d 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -22,12 +22,29 @@
 #include <linux/export.h>
 #include <linux/kernel_stat.h>
 #include <linux/mutex.h>
+#include <linux/slab.h>
 #include <linux/tick.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
 
 #include "cpufreq_governor.h"
 
+static struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
+{
+	if (have_governor_per_policy())
+		return &policy->kobj;
+	else
+		return cpufreq_global_kobject;
+}
+
+static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
+{
+	if (have_governor_per_policy())
+		return dbs_data->cdata->attr_group_gov_pol;
+	else
+		return dbs_data->cdata->attr_group_gov_sys;
+}
+
 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
 {
 	u64 idle_time;
@@ -50,13 +67,13 @@
 	return cputime_to_usecs(idle_time);
 }
 
-u64 get_cpu_idle_time(unsigned int cpu, u64 *wall)
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
 {
-	u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
+	u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
 
 	if (idle_time == -1ULL)
 		return get_cpu_idle_time_jiffy(cpu, wall);
-	else
+	else if (!io_busy)
 		idle_time += get_cpu_iowait_time_us(cpu, wall);
 
 	return idle_time;
@@ -65,7 +82,7 @@
 
 void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
 {
-	struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
+	struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
 	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 	struct cpufreq_policy *policy;
@@ -73,7 +90,7 @@
 	unsigned int ignore_nice;
 	unsigned int j;
 
-	if (dbs_data->governor == GOV_ONDEMAND)
+	if (dbs_data->cdata->governor == GOV_ONDEMAND)
 		ignore_nice = od_tuners->ignore_nice;
 	else
 		ignore_nice = cs_tuners->ignore_nice;
@@ -83,13 +100,22 @@
 	/* Get Absolute Load (in terms of freq for ondemand gov) */
 	for_each_cpu(j, policy->cpus) {
 		struct cpu_dbs_common_info *j_cdbs;
-		u64 cur_wall_time, cur_idle_time, cur_iowait_time;
-		unsigned int idle_time, wall_time, iowait_time;
+		u64 cur_wall_time, cur_idle_time;
+		unsigned int idle_time, wall_time;
 		unsigned int load;
+		int io_busy = 0;
 
-		j_cdbs = dbs_data->get_cpu_cdbs(j);
+		j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
 
-		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
+		/*
+		 * For the purpose of ondemand, waiting for disk IO is
+		 * an indication that you're performance critical, and
+		 * not that the system is actually idle. So do not add
+		 * the iowait time to the cpu idle time.
+		 */
+		if (dbs_data->cdata->governor == GOV_ONDEMAND)
+			io_busy = od_tuners->io_is_busy;
+		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
 
 		wall_time = (unsigned int)
 			(cur_wall_time - j_cdbs->prev_cpu_wall);
@@ -117,35 +143,12 @@
 			idle_time += jiffies_to_usecs(cur_nice_jiffies);
 		}
 
-		if (dbs_data->governor == GOV_ONDEMAND) {
-			struct od_cpu_dbs_info_s *od_j_dbs_info =
-				dbs_data->get_cpu_dbs_info_s(cpu);
-
-			cur_iowait_time = get_cpu_iowait_time_us(j,
-					&cur_wall_time);
-			if (cur_iowait_time == -1ULL)
-				cur_iowait_time = 0;
-
-			iowait_time = (unsigned int) (cur_iowait_time -
-					od_j_dbs_info->prev_cpu_iowait);
-			od_j_dbs_info->prev_cpu_iowait = cur_iowait_time;
-
-			/*
-			 * For the purpose of ondemand, waiting for disk IO is
-			 * an indication that you're performance critical, and
-			 * not that the system is actually idle. So subtract the
-			 * iowait time from the cpu idle time.
-			 */
-			if (od_tuners->io_is_busy && idle_time >= iowait_time)
-				idle_time -= iowait_time;
-		}
-
 		if (unlikely(!wall_time || wall_time < idle_time))
 			continue;
 
 		load = 100 * (wall_time - idle_time) / wall_time;
 
-		if (dbs_data->governor == GOV_ONDEMAND) {
+		if (dbs_data->cdata->governor == GOV_ONDEMAND) {
 			int freq_avg = __cpufreq_driver_getavg(policy, j);
 			if (freq_avg <= 0)
 				freq_avg = policy->cur;
@@ -157,24 +160,42 @@
 			max_load = load;
 	}
 
-	dbs_data->gov_check_cpu(cpu, max_load);
+	dbs_data->cdata->gov_check_cpu(cpu, max_load);
 }
 EXPORT_SYMBOL_GPL(dbs_check_cpu);
 
-static inline void dbs_timer_init(struct dbs_data *dbs_data, int cpu,
-				  unsigned int sampling_rate)
+static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
+		unsigned int delay)
 {
-	int delay = delay_for_sampling_rate(sampling_rate);
-	struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
+	struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
 
-	schedule_delayed_work_on(cpu, &cdbs->work, delay);
+	mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
 }
 
-static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu)
+void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
+		unsigned int delay, bool all_cpus)
 {
-	struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
+	int i;
 
-	cancel_delayed_work_sync(&cdbs->work);
+	if (!all_cpus) {
+		__gov_queue_work(smp_processor_id(), dbs_data, delay);
+	} else {
+		for_each_cpu(i, policy->cpus)
+			__gov_queue_work(i, dbs_data, delay);
+	}
+}
+EXPORT_SYMBOL_GPL(gov_queue_work);
+
+static inline void gov_cancel_work(struct dbs_data *dbs_data,
+		struct cpufreq_policy *policy)
+{
+	struct cpu_dbs_common_info *cdbs;
+	int i;
+
+	for_each_cpu(i, policy->cpus) {
+		cdbs = dbs_data->cdata->get_cpu_cdbs(i);
+		cancel_delayed_work_sync(&cdbs->work);
+	}
 }
 
 /* Will return if we need to evaluate cpu load again or not */
@@ -196,31 +217,130 @@
 }
 EXPORT_SYMBOL_GPL(need_load_eval);
 
-int cpufreq_governor_dbs(struct dbs_data *dbs_data,
-		struct cpufreq_policy *policy, unsigned int event)
+static void set_sampling_rate(struct dbs_data *dbs_data,
+		unsigned int sampling_rate)
 {
+	if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+		struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+		cs_tuners->sampling_rate = sampling_rate;
+	} else {
+		struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+		od_tuners->sampling_rate = sampling_rate;
+	}
+}
+
+int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+		struct common_dbs_data *cdata, unsigned int event)
+{
+	struct dbs_data *dbs_data;
 	struct od_cpu_dbs_info_s *od_dbs_info = NULL;
 	struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
-	struct cs_ops *cs_ops = NULL;
 	struct od_ops *od_ops = NULL;
-	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
-	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+	struct od_dbs_tuners *od_tuners = NULL;
+	struct cs_dbs_tuners *cs_tuners = NULL;
 	struct cpu_dbs_common_info *cpu_cdbs;
-	unsigned int *sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
+	unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
+	int io_busy = 0;
 	int rc;
 
-	cpu_cdbs = dbs_data->get_cpu_cdbs(cpu);
+	if (have_governor_per_policy())
+		dbs_data = policy->governor_data;
+	else
+		dbs_data = cdata->gdbs_data;
 
-	if (dbs_data->governor == GOV_CONSERVATIVE) {
-		cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
-		sampling_rate = &cs_tuners->sampling_rate;
+	WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT));
+
+	switch (event) {
+	case CPUFREQ_GOV_POLICY_INIT:
+		if (have_governor_per_policy()) {
+			WARN_ON(dbs_data);
+		} else if (dbs_data) {
+			policy->governor_data = dbs_data;
+			return 0;
+		}
+
+		dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
+		if (!dbs_data) {
+			pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
+			return -ENOMEM;
+		}
+
+		dbs_data->cdata = cdata;
+		rc = cdata->init(dbs_data);
+		if (rc) {
+			pr_err("%s: POLICY_INIT: init() failed\n", __func__);
+			kfree(dbs_data);
+			return rc;
+		}
+
+		rc = sysfs_create_group(get_governor_parent_kobj(policy),
+				get_sysfs_attr(dbs_data));
+		if (rc) {
+			cdata->exit(dbs_data);
+			kfree(dbs_data);
+			return rc;
+		}
+
+		policy->governor_data = dbs_data;
+
+		/* policy latency is in nS. Convert it to uS first */
+		latency = policy->cpuinfo.transition_latency / 1000;
+		if (latency == 0)
+			latency = 1;
+
+		/* Bring kernel and HW constraints together */
+		dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
+				MIN_LATENCY_MULTIPLIER * latency);
+		set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
+					latency * LATENCY_MULTIPLIER));
+
+		if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+			struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
+
+			cpufreq_register_notifier(cs_ops->notifier_block,
+					CPUFREQ_TRANSITION_NOTIFIER);
+		}
+
+		if (!have_governor_per_policy())
+			cdata->gdbs_data = dbs_data;
+
+		return 0;
+	case CPUFREQ_GOV_POLICY_EXIT:
+		if ((policy->governor->initialized == 1) ||
+				have_governor_per_policy()) {
+			sysfs_remove_group(get_governor_parent_kobj(policy),
+					get_sysfs_attr(dbs_data));
+
+			if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+				struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
+
+				cpufreq_unregister_notifier(cs_ops->notifier_block,
+						CPUFREQ_TRANSITION_NOTIFIER);
+			}
+
+			cdata->exit(dbs_data);
+			kfree(dbs_data);
+			cdata->gdbs_data = NULL;
+		}
+
+		policy->governor_data = NULL;
+		return 0;
+	}
+
+	cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+
+	if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+		cs_tuners = dbs_data->tuners;
+		cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
+		sampling_rate = cs_tuners->sampling_rate;
 		ignore_nice = cs_tuners->ignore_nice;
-		cs_ops = dbs_data->gov_ops;
 	} else {
-		od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
-		sampling_rate = &od_tuners->sampling_rate;
+		od_tuners = dbs_data->tuners;
+		od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
+		sampling_rate = od_tuners->sampling_rate;
 		ignore_nice = od_tuners->ignore_nice;
-		od_ops = dbs_data->gov_ops;
+		od_ops = dbs_data->cdata->gov_ops;
+		io_busy = od_tuners->io_is_busy;
 	}
 
 	switch (event) {
@@ -232,96 +352,53 @@
 
 		for_each_cpu(j, policy->cpus) {
 			struct cpu_dbs_common_info *j_cdbs =
-				dbs_data->get_cpu_cdbs(j);
+				dbs_data->cdata->get_cpu_cdbs(j);
 
 			j_cdbs->cpu = j;
 			j_cdbs->cur_policy = policy;
 			j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
-					&j_cdbs->prev_cpu_wall);
+					       &j_cdbs->prev_cpu_wall, io_busy);
 			if (ignore_nice)
 				j_cdbs->prev_cpu_nice =
 					kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 
 			mutex_init(&j_cdbs->timer_mutex);
 			INIT_DEFERRABLE_WORK(&j_cdbs->work,
-					     dbs_data->gov_dbs_timer);
-		}
-
-		if (!policy->governor->initialized) {
-			rc = sysfs_create_group(cpufreq_global_kobject,
-					dbs_data->attr_group);
-			if (rc) {
-				mutex_unlock(&dbs_data->mutex);
-				return rc;
-			}
+					     dbs_data->cdata->gov_dbs_timer);
 		}
 
 		/*
 		 * conservative does not implement micro like ondemand
 		 * governor, thus we are bound to jiffes/HZ
 		 */
-		if (dbs_data->governor == GOV_CONSERVATIVE) {
+		if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
 			cs_dbs_info->down_skip = 0;
 			cs_dbs_info->enable = 1;
 			cs_dbs_info->requested_freq = policy->cur;
-
-			if (!policy->governor->initialized) {
-				cpufreq_register_notifier(cs_ops->notifier_block,
-						CPUFREQ_TRANSITION_NOTIFIER);
-
-				dbs_data->min_sampling_rate =
-					MIN_SAMPLING_RATE_RATIO *
-					jiffies_to_usecs(10);
-			}
 		} else {
 			od_dbs_info->rate_mult = 1;
 			od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
 			od_ops->powersave_bias_init_cpu(cpu);
-
-			if (!policy->governor->initialized)
-				od_tuners->io_is_busy = od_ops->io_busy();
 		}
 
-		if (policy->governor->initialized)
-			goto unlock;
-
-		/* policy latency is in nS. Convert it to uS first */
-		latency = policy->cpuinfo.transition_latency / 1000;
-		if (latency == 0)
-			latency = 1;
-
-		/* Bring kernel and HW constraints together */
-		dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
-				MIN_LATENCY_MULTIPLIER * latency);
-		*sampling_rate = max(dbs_data->min_sampling_rate, latency *
-				LATENCY_MULTIPLIER);
-unlock:
 		mutex_unlock(&dbs_data->mutex);
 
 		/* Initiate timer time stamp */
 		cpu_cdbs->time_stamp = ktime_get();
 
-		for_each_cpu(j, policy->cpus)
-			dbs_timer_init(dbs_data, j, *sampling_rate);
+		gov_queue_work(dbs_data, policy,
+				delay_for_sampling_rate(sampling_rate), true);
 		break;
 
 	case CPUFREQ_GOV_STOP:
-		if (dbs_data->governor == GOV_CONSERVATIVE)
+		if (dbs_data->cdata->governor == GOV_CONSERVATIVE)
 			cs_dbs_info->enable = 0;
 
-		for_each_cpu(j, policy->cpus)
-			dbs_timer_exit(dbs_data, j);
+		gov_cancel_work(dbs_data, policy);
 
 		mutex_lock(&dbs_data->mutex);
 		mutex_destroy(&cpu_cdbs->timer_mutex);
 
-		if (policy->governor->initialized == 1) {
-			sysfs_remove_group(cpufreq_global_kobject,
-					dbs_data->attr_group);
-			if (dbs_data->governor == GOV_CONSERVATIVE)
-				cpufreq_unregister_notifier(cs_ops->notifier_block,
-						CPUFREQ_TRANSITION_NOTIFIER);
-		}
 		mutex_unlock(&dbs_data->mutex);
 
 		break;
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index cc4bd2f..8ac3353 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -34,20 +34,81 @@
  */
 #define MIN_SAMPLING_RATE_RATIO			(2)
 #define LATENCY_MULTIPLIER			(1000)
-#define MIN_LATENCY_MULTIPLIER			(100)
+#define MIN_LATENCY_MULTIPLIER			(20)
 #define TRANSITION_LATENCY_LIMIT		(10 * 1000 * 1000)
 
 /* Ondemand Sampling types */
 enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
 
-/* Macro creating sysfs show routines */
-#define show_one(_gov, file_name, object)				\
-static ssize_t show_##file_name						\
+/*
+ * Macro for creating governors sysfs routines
+ *
+ * - gov_sys: One governor instance per whole system
+ * - gov_pol: One governor instance per policy
+ */
+
+/* Create attributes */
+#define gov_sys_attr_ro(_name)						\
+static struct global_attr _name##_gov_sys =				\
+__ATTR(_name, 0444, show_##_name##_gov_sys, NULL)
+
+#define gov_sys_attr_rw(_name)						\
+static struct global_attr _name##_gov_sys =				\
+__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
+
+#define gov_pol_attr_ro(_name)						\
+static struct freq_attr _name##_gov_pol =				\
+__ATTR(_name, 0444, show_##_name##_gov_pol, NULL)
+
+#define gov_pol_attr_rw(_name)						\
+static struct freq_attr _name##_gov_pol =				\
+__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define gov_sys_pol_attr_rw(_name)					\
+	gov_sys_attr_rw(_name);						\
+	gov_pol_attr_rw(_name)
+
+#define gov_sys_pol_attr_ro(_name)					\
+	gov_sys_attr_ro(_name);						\
+	gov_pol_attr_ro(_name)
+
+/* Create show/store routines */
+#define show_one(_gov, file_name)					\
+static ssize_t show_##file_name##_gov_sys				\
 (struct kobject *kobj, struct attribute *attr, char *buf)		\
 {									\
-	return sprintf(buf, "%u\n", _gov##_tuners.object);		\
+	struct _gov##_dbs_tuners *tuners = _gov##_dbs_cdata.gdbs_data->tuners; \
+	return sprintf(buf, "%u\n", tuners->file_name);			\
+}									\
+									\
+static ssize_t show_##file_name##_gov_pol					\
+(struct cpufreq_policy *policy, char *buf)				\
+{									\
+	struct dbs_data *dbs_data = policy->governor_data;		\
+	struct _gov##_dbs_tuners *tuners = dbs_data->tuners;		\
+	return sprintf(buf, "%u\n", tuners->file_name);			\
 }
 
+#define store_one(_gov, file_name)					\
+static ssize_t store_##file_name##_gov_sys				\
+(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)	\
+{									\
+	struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data;		\
+	return store_##file_name(dbs_data, buf, count);			\
+}									\
+									\
+static ssize_t store_##file_name##_gov_pol				\
+(struct cpufreq_policy *policy, const char *buf, size_t count)		\
+{									\
+	struct dbs_data *dbs_data = policy->governor_data;		\
+	return store_##file_name(dbs_data, buf, count);			\
+}
+
+#define show_store_one(_gov, file_name)					\
+show_one(_gov, file_name);						\
+store_one(_gov, file_name)
+
+/* create helper routines */
 #define define_get_cpu_dbs_routines(_dbs_info)				\
 static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu)		\
 {									\
@@ -87,7 +148,6 @@
 
 struct od_cpu_dbs_info_s {
 	struct cpu_dbs_common_info cdbs;
-	u64 prev_cpu_iowait;
 	struct cpufreq_frequency_table *freq_table;
 	unsigned int freq_lo;
 	unsigned int freq_lo_jiffies;
@@ -103,7 +163,7 @@
 	unsigned int enable:1;
 };
 
-/* Governers sysfs tunables */
+/* Per policy Governers sysfs tunables */
 struct od_dbs_tuners {
 	unsigned int ignore_nice;
 	unsigned int sampling_rate;
@@ -123,31 +183,42 @@
 	unsigned int freq_step;
 };
 
-/* Per Governer data */
-struct dbs_data {
+/* Common Governer data across policies */
+struct dbs_data;
+struct common_dbs_data {
 	/* Common across governors */
 	#define GOV_ONDEMAND		0
 	#define GOV_CONSERVATIVE	1
 	int governor;
-	unsigned int min_sampling_rate;
-	struct attribute_group *attr_group;
-	void *tuners;
+	struct attribute_group *attr_group_gov_sys; /* one governor - system */
+	struct attribute_group *attr_group_gov_pol; /* one governor - policy */
 
-	/* dbs_mutex protects dbs_enable in governor start/stop */
-	struct mutex mutex;
+	/* Common data for platforms that don't set have_governor_per_policy */
+	struct dbs_data *gdbs_data;
 
 	struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
 	void *(*get_cpu_dbs_info_s)(int cpu);
 	void (*gov_dbs_timer)(struct work_struct *work);
 	void (*gov_check_cpu)(int cpu, unsigned int load);
+	int (*init)(struct dbs_data *dbs_data);
+	void (*exit)(struct dbs_data *dbs_data);
 
 	/* Governor specific ops, see below */
 	void *gov_ops;
 };
 
+/* Governer Per policy data */
+struct dbs_data {
+	struct common_dbs_data *cdata;
+	unsigned int min_sampling_rate;
+	void *tuners;
+
+	/* dbs_mutex protects dbs_enable in governor start/stop */
+	struct mutex mutex;
+};
+
 /* Governor specific ops, will be passed to dbs_data->gov_ops */
 struct od_ops {
-	int (*io_busy)(void);
 	void (*powersave_bias_init_cpu)(int cpu);
 	unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
 			unsigned int freq_next, unsigned int relation);
@@ -169,10 +240,31 @@
 	return delay;
 }
 
-u64 get_cpu_idle_time(unsigned int cpu, u64 *wall);
+#define declare_show_sampling_rate_min(_gov)				\
+static ssize_t show_sampling_rate_min_gov_sys				\
+(struct kobject *kobj, struct attribute *attr, char *buf)		\
+{									\
+	struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data;		\
+	return sprintf(buf, "%u\n", dbs_data->min_sampling_rate);	\
+}									\
+									\
+static ssize_t show_sampling_rate_min_gov_pol				\
+(struct cpufreq_policy *policy, char *buf)				\
+{									\
+	struct dbs_data *dbs_data = policy->governor_data;		\
+	return sprintf(buf, "%u\n", dbs_data->min_sampling_rate);	\
+}
+
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
 void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
 bool need_load_eval(struct cpu_dbs_common_info *cdbs,
 		unsigned int sampling_rate);
-int cpufreq_governor_dbs(struct dbs_data *dbs_data,
-		struct cpufreq_policy *policy, unsigned int event);
+int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+		struct common_dbs_data *cdata, unsigned int event);
+void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
+		unsigned int delay, bool all_cpus);
+void od_register_powersave_bias_handler(unsigned int (*f)
+		(struct cpufreq_policy *, unsigned int, unsigned int),
+		unsigned int powersave_bias);
+void od_unregister_powersave_bias_handler(void);
 #endif /* _CPUFREQ_GOVERNOR_H */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index f3eb26c..b0ffef9 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -20,9 +20,11 @@
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/percpu-defs.h>
+#include <linux/slab.h>
 #include <linux/sysfs.h>
 #include <linux/tick.h>
 #include <linux/types.h>
+#include <linux/cpu.h>
 
 #include "cpufreq_governor.h"
 
@@ -37,22 +39,14 @@
 #define MIN_FREQUENCY_UP_THRESHOLD		(11)
 #define MAX_FREQUENCY_UP_THRESHOLD		(100)
 
-static struct dbs_data od_dbs_data;
 static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
 
+static struct od_ops od_ops;
+
 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
 static struct cpufreq_governor cpufreq_gov_ondemand;
 #endif
 
-static struct od_dbs_tuners od_tuners = {
-	.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
-	.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
-	.adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
-			    DEF_FREQUENCY_DOWN_DIFFERENTIAL,
-	.ignore_nice = 0,
-	.powersave_bias = 0,
-};
-
 static void ondemand_powersave_bias_init_cpu(int cpu)
 {
 	struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
@@ -89,7 +83,7 @@
  * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
  * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
  */
-static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
+static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
 		unsigned int freq_next, unsigned int relation)
 {
 	unsigned int freq_req, freq_reduc, freq_avg;
@@ -98,6 +92,8 @@
 	unsigned int jiffies_total, jiffies_hi, jiffies_lo;
 	struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
 						   policy->cpu);
+	struct dbs_data *dbs_data = policy->governor_data;
+	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 
 	if (!dbs_info->freq_table) {
 		dbs_info->freq_lo = 0;
@@ -108,7 +104,7 @@
 	cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
 			relation, &index);
 	freq_req = dbs_info->freq_table[index].frequency;
-	freq_reduc = freq_req * od_tuners.powersave_bias / 1000;
+	freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
 	freq_avg = freq_req - freq_reduc;
 
 	/* Find freq bounds for freq_avg in freq_table */
@@ -127,7 +123,7 @@
 		dbs_info->freq_lo_jiffies = 0;
 		return freq_lo;
 	}
-	jiffies_total = usecs_to_jiffies(od_tuners.sampling_rate);
+	jiffies_total = usecs_to_jiffies(od_tuners->sampling_rate);
 	jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
 	jiffies_hi += ((freq_hi - freq_lo) / 2);
 	jiffies_hi /= (freq_hi - freq_lo);
@@ -148,12 +144,16 @@
 
 static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
 {
-	if (od_tuners.powersave_bias)
-		freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
+	struct dbs_data *dbs_data = p->governor_data;
+	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+
+	if (od_tuners->powersave_bias)
+		freq = od_ops.powersave_bias_target(p, freq,
+				CPUFREQ_RELATION_H);
 	else if (p->cur == p->max)
 		return;
 
-	__cpufreq_driver_target(p, freq, od_tuners.powersave_bias ?
+	__cpufreq_driver_target(p, freq, od_tuners->powersave_bias ?
 			CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
 }
 
@@ -170,15 +170,17 @@
 {
 	struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
 	struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
+	struct dbs_data *dbs_data = policy->governor_data;
+	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 
 	dbs_info->freq_lo = 0;
 
 	/* Check for frequency increase */
-	if (load_freq > od_tuners.up_threshold * policy->cur) {
+	if (load_freq > od_tuners->up_threshold * policy->cur) {
 		/* If switching to max speed, apply sampling_down_factor */
 		if (policy->cur < policy->max)
 			dbs_info->rate_mult =
-				od_tuners.sampling_down_factor;
+				od_tuners->sampling_down_factor;
 		dbs_freq_increase(policy, policy->max);
 		return;
 	}
@@ -193,9 +195,10 @@
 	 * support the current CPU usage without triggering the up policy. To be
 	 * safe, we focus 10 points under the threshold.
 	 */
-	if (load_freq < od_tuners.adj_up_threshold * policy->cur) {
+	if (load_freq < od_tuners->adj_up_threshold
+			* policy->cur) {
 		unsigned int freq_next;
-		freq_next = load_freq / od_tuners.adj_up_threshold;
+		freq_next = load_freq / od_tuners->adj_up_threshold;
 
 		/* No longer fully busy, reset rate_mult */
 		dbs_info->rate_mult = 1;
@@ -203,65 +206,62 @@
 		if (freq_next < policy->min)
 			freq_next = policy->min;
 
-		if (!od_tuners.powersave_bias) {
+		if (!od_tuners->powersave_bias) {
 			__cpufreq_driver_target(policy, freq_next,
 					CPUFREQ_RELATION_L);
-		} else {
-			int freq = powersave_bias_target(policy, freq_next,
-					CPUFREQ_RELATION_L);
-			__cpufreq_driver_target(policy, freq,
-					CPUFREQ_RELATION_L);
+			return;
 		}
+
+		freq_next = od_ops.powersave_bias_target(policy, freq_next,
+					CPUFREQ_RELATION_L);
+		__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
 	}
 }
 
 static void od_dbs_timer(struct work_struct *work)
 {
-	struct delayed_work *dw = to_delayed_work(work);
 	struct od_cpu_dbs_info_s *dbs_info =
 		container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
 	unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
 	struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
 			cpu);
-	int delay, sample_type = core_dbs_info->sample_type;
-	bool eval_load;
+	struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
+	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+	int delay = 0, sample_type = core_dbs_info->sample_type;
+	bool modify_all = true;
 
 	mutex_lock(&core_dbs_info->cdbs.timer_mutex);
-	eval_load = need_load_eval(&core_dbs_info->cdbs,
-			od_tuners.sampling_rate);
+	if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
+		modify_all = false;
+		goto max_delay;
+	}
 
 	/* Common NORMAL_SAMPLE setup */
 	core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
 	if (sample_type == OD_SUB_SAMPLE) {
 		delay = core_dbs_info->freq_lo_jiffies;
-		if (eval_load)
-			__cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
-						core_dbs_info->freq_lo,
-						CPUFREQ_RELATION_H);
+		__cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
+				core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
 	} else {
-		if (eval_load)
-			dbs_check_cpu(&od_dbs_data, cpu);
+		dbs_check_cpu(dbs_data, cpu);
 		if (core_dbs_info->freq_lo) {
 			/* Setup timer for SUB_SAMPLE */
 			core_dbs_info->sample_type = OD_SUB_SAMPLE;
 			delay = core_dbs_info->freq_hi_jiffies;
-		} else {
-			delay = delay_for_sampling_rate(od_tuners.sampling_rate
-						* core_dbs_info->rate_mult);
 		}
 	}
 
-	schedule_delayed_work_on(smp_processor_id(), dw, delay);
+max_delay:
+	if (!delay)
+		delay = delay_for_sampling_rate(od_tuners->sampling_rate
+				* core_dbs_info->rate_mult);
+
+	gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
 	mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
 }
 
 /************************** sysfs interface ************************/
-
-static ssize_t show_sampling_rate_min(struct kobject *kobj,
-				      struct attribute *attr, char *buf)
-{
-	return sprintf(buf, "%u\n", od_dbs_data.min_sampling_rate);
-}
+static struct common_dbs_data od_dbs_cdata;
 
 /**
  * update_sampling_rate - update sampling rate effective immediately if needed.
@@ -276,12 +276,14 @@
  * reducing the sampling rate, we need to make the new value effective
  * immediately.
  */
-static void update_sampling_rate(unsigned int new_rate)
+static void update_sampling_rate(struct dbs_data *dbs_data,
+		unsigned int new_rate)
 {
+	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 	int cpu;
 
-	od_tuners.sampling_rate = new_rate = max(new_rate,
-			od_dbs_data.min_sampling_rate);
+	od_tuners->sampling_rate = new_rate = max(new_rate,
+			dbs_data->min_sampling_rate);
 
 	for_each_online_cpu(cpu) {
 		struct cpufreq_policy *policy;
@@ -314,42 +316,54 @@
 			cancel_delayed_work_sync(&dbs_info->cdbs.work);
 			mutex_lock(&dbs_info->cdbs.timer_mutex);
 
-			schedule_delayed_work_on(cpu, &dbs_info->cdbs.work,
-					usecs_to_jiffies(new_rate));
+			gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
+					usecs_to_jiffies(new_rate), true);
 
 		}
 		mutex_unlock(&dbs_info->cdbs.timer_mutex);
 	}
 }
 
-static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
-				   const char *buf, size_t count)
+static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+		size_t count)
 {
 	unsigned int input;
 	int ret;
 	ret = sscanf(buf, "%u", &input);
 	if (ret != 1)
 		return -EINVAL;
-	update_sampling_rate(input);
+
+	update_sampling_rate(dbs_data, input);
 	return count;
 }
 
-static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
-				   const char *buf, size_t count)
+static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
+		size_t count)
 {
+	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 	unsigned int input;
 	int ret;
+	unsigned int j;
 
 	ret = sscanf(buf, "%u", &input);
 	if (ret != 1)
 		return -EINVAL;
-	od_tuners.io_is_busy = !!input;
+	od_tuners->io_is_busy = !!input;
+
+	/* we need to re-evaluate prev_cpu_idle */
+	for_each_online_cpu(j) {
+		struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
+									j);
+		dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
+			&dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
+	}
 	return count;
 }
 
-static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
-				  const char *buf, size_t count)
+static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
+		size_t count)
 {
+	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 	unsigned int input;
 	int ret;
 	ret = sscanf(buf, "%u", &input);
@@ -359,23 +373,24 @@
 		return -EINVAL;
 	}
 	/* Calculate the new adj_up_threshold */
-	od_tuners.adj_up_threshold += input;
-	od_tuners.adj_up_threshold -= od_tuners.up_threshold;
+	od_tuners->adj_up_threshold += input;
+	od_tuners->adj_up_threshold -= od_tuners->up_threshold;
 
-	od_tuners.up_threshold = input;
+	od_tuners->up_threshold = input;
 	return count;
 }
 
-static ssize_t store_sampling_down_factor(struct kobject *a,
-			struct attribute *b, const char *buf, size_t count)
+static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
+		const char *buf, size_t count)
 {
+	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 	unsigned int input, j;
 	int ret;
 	ret = sscanf(buf, "%u", &input);
 
 	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
 		return -EINVAL;
-	od_tuners.sampling_down_factor = input;
+	od_tuners->sampling_down_factor = input;
 
 	/* Reset down sampling multiplier in case it was active */
 	for_each_online_cpu(j) {
@@ -386,9 +401,10 @@
 	return count;
 }
 
-static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
-				      const char *buf, size_t count)
+static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
+		size_t count)
 {
+	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 	unsigned int input;
 	int ret;
 
@@ -401,18 +417,18 @@
 	if (input > 1)
 		input = 1;
 
-	if (input == od_tuners.ignore_nice) { /* nothing to do */
+	if (input == od_tuners->ignore_nice) { /* nothing to do */
 		return count;
 	}
-	od_tuners.ignore_nice = input;
+	od_tuners->ignore_nice = input;
 
 	/* we need to re-evaluate prev_cpu_idle */
 	for_each_online_cpu(j) {
 		struct od_cpu_dbs_info_s *dbs_info;
 		dbs_info = &per_cpu(od_cpu_dbs_info, j);
 		dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
-						&dbs_info->cdbs.prev_cpu_wall);
-		if (od_tuners.ignore_nice)
+			&dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
+		if (od_tuners->ignore_nice)
 			dbs_info->cdbs.prev_cpu_nice =
 				kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 
@@ -420,9 +436,10 @@
 	return count;
 }
 
-static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
-				    const char *buf, size_t count)
+static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
+		size_t count)
 {
+	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 	unsigned int input;
 	int ret;
 	ret = sscanf(buf, "%u", &input);
@@ -433,68 +450,179 @@
 	if (input > 1000)
 		input = 1000;
 
-	od_tuners.powersave_bias = input;
+	od_tuners->powersave_bias = input;
 	ondemand_powersave_bias_init();
 	return count;
 }
 
-show_one(od, sampling_rate, sampling_rate);
-show_one(od, io_is_busy, io_is_busy);
-show_one(od, up_threshold, up_threshold);
-show_one(od, sampling_down_factor, sampling_down_factor);
-show_one(od, ignore_nice_load, ignore_nice);
-show_one(od, powersave_bias, powersave_bias);
+show_store_one(od, sampling_rate);
+show_store_one(od, io_is_busy);
+show_store_one(od, up_threshold);
+show_store_one(od, sampling_down_factor);
+show_store_one(od, ignore_nice);
+show_store_one(od, powersave_bias);
+declare_show_sampling_rate_min(od);
 
-define_one_global_rw(sampling_rate);
-define_one_global_rw(io_is_busy);
-define_one_global_rw(up_threshold);
-define_one_global_rw(sampling_down_factor);
-define_one_global_rw(ignore_nice_load);
-define_one_global_rw(powersave_bias);
-define_one_global_ro(sampling_rate_min);
+gov_sys_pol_attr_rw(sampling_rate);
+gov_sys_pol_attr_rw(io_is_busy);
+gov_sys_pol_attr_rw(up_threshold);
+gov_sys_pol_attr_rw(sampling_down_factor);
+gov_sys_pol_attr_rw(ignore_nice);
+gov_sys_pol_attr_rw(powersave_bias);
+gov_sys_pol_attr_ro(sampling_rate_min);
 
-static struct attribute *dbs_attributes[] = {
-	&sampling_rate_min.attr,
-	&sampling_rate.attr,
-	&up_threshold.attr,
-	&sampling_down_factor.attr,
-	&ignore_nice_load.attr,
-	&powersave_bias.attr,
-	&io_is_busy.attr,
+static struct attribute *dbs_attributes_gov_sys[] = {
+	&sampling_rate_min_gov_sys.attr,
+	&sampling_rate_gov_sys.attr,
+	&up_threshold_gov_sys.attr,
+	&sampling_down_factor_gov_sys.attr,
+	&ignore_nice_gov_sys.attr,
+	&powersave_bias_gov_sys.attr,
+	&io_is_busy_gov_sys.attr,
 	NULL
 };
 
-static struct attribute_group od_attr_group = {
-	.attrs = dbs_attributes,
+static struct attribute_group od_attr_group_gov_sys = {
+	.attrs = dbs_attributes_gov_sys,
+	.name = "ondemand",
+};
+
+static struct attribute *dbs_attributes_gov_pol[] = {
+	&sampling_rate_min_gov_pol.attr,
+	&sampling_rate_gov_pol.attr,
+	&up_threshold_gov_pol.attr,
+	&sampling_down_factor_gov_pol.attr,
+	&ignore_nice_gov_pol.attr,
+	&powersave_bias_gov_pol.attr,
+	&io_is_busy_gov_pol.attr,
+	NULL
+};
+
+static struct attribute_group od_attr_group_gov_pol = {
+	.attrs = dbs_attributes_gov_pol,
 	.name = "ondemand",
 };
 
 /************************** sysfs end ************************/
 
+static int od_init(struct dbs_data *dbs_data)
+{
+	struct od_dbs_tuners *tuners;
+	u64 idle_time;
+	int cpu;
+
+	tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL);
+	if (!tuners) {
+		pr_err("%s: kzalloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	cpu = get_cpu();
+	idle_time = get_cpu_idle_time_us(cpu, NULL);
+	put_cpu();
+	if (idle_time != -1ULL) {
+		/* Idle micro accounting is supported. Use finer thresholds */
+		tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
+		tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
+			MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
+		/*
+		 * In nohz/micro accounting case we set the minimum frequency
+		 * not depending on HZ, but fixed (very low). The deferred
+		 * timer might skip some samples if idle/sleeping as needed.
+		*/
+		dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
+	} else {
+		tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
+		tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
+			DEF_FREQUENCY_DOWN_DIFFERENTIAL;
+
+		/* For correct statistics, we need 10 ticks for each measure */
+		dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
+			jiffies_to_usecs(10);
+	}
+
+	tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
+	tuners->ignore_nice = 0;
+	tuners->powersave_bias = 0;
+	tuners->io_is_busy = should_io_be_busy();
+
+	dbs_data->tuners = tuners;
+	pr_info("%s: tuners %p\n", __func__, tuners);
+	mutex_init(&dbs_data->mutex);
+	return 0;
+}
+
+static void od_exit(struct dbs_data *dbs_data)
+{
+	kfree(dbs_data->tuners);
+}
+
 define_get_cpu_dbs_routines(od_cpu_dbs_info);
 
 static struct od_ops od_ops = {
-	.io_busy = should_io_be_busy,
 	.powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
-	.powersave_bias_target = powersave_bias_target,
+	.powersave_bias_target = generic_powersave_bias_target,
 	.freq_increase = dbs_freq_increase,
 };
 
-static struct dbs_data od_dbs_data = {
+static struct common_dbs_data od_dbs_cdata = {
 	.governor = GOV_ONDEMAND,
-	.attr_group = &od_attr_group,
-	.tuners = &od_tuners,
+	.attr_group_gov_sys = &od_attr_group_gov_sys,
+	.attr_group_gov_pol = &od_attr_group_gov_pol,
 	.get_cpu_cdbs = get_cpu_cdbs,
 	.get_cpu_dbs_info_s = get_cpu_dbs_info_s,
 	.gov_dbs_timer = od_dbs_timer,
 	.gov_check_cpu = od_check_cpu,
 	.gov_ops = &od_ops,
+	.init = od_init,
+	.exit = od_exit,
 };
 
+static void od_set_powersave_bias(unsigned int powersave_bias)
+{
+	struct cpufreq_policy *policy;
+	struct dbs_data *dbs_data;
+	struct od_dbs_tuners *od_tuners;
+	unsigned int cpu;
+	cpumask_t done;
+
+	cpumask_clear(&done);
+
+	get_online_cpus();
+	for_each_online_cpu(cpu) {
+		if (cpumask_test_cpu(cpu, &done))
+			continue;
+
+		policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
+		dbs_data = policy->governor_data;
+		od_tuners = dbs_data->tuners;
+		od_tuners->powersave_bias = powersave_bias;
+
+		cpumask_or(&done, &done, policy->cpus);
+	}
+	put_online_cpus();
+}
+
+void od_register_powersave_bias_handler(unsigned int (*f)
+		(struct cpufreq_policy *, unsigned int, unsigned int),
+		unsigned int powersave_bias)
+{
+	od_ops.powersave_bias_target = f;
+	od_set_powersave_bias(powersave_bias);
+}
+EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
+
+void od_unregister_powersave_bias_handler(void)
+{
+	od_ops.powersave_bias_target = generic_powersave_bias_target;
+	od_set_powersave_bias(0);
+}
+EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
+
 static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
 		unsigned int event)
 {
-	return cpufreq_governor_dbs(&od_dbs_data, policy, event);
+	return cpufreq_governor_dbs(policy, &od_dbs_cdata, event);
 }
 
 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
@@ -509,29 +637,6 @@
 
 static int __init cpufreq_gov_dbs_init(void)
 {
-	u64 idle_time;
-	int cpu = get_cpu();
-
-	mutex_init(&od_dbs_data.mutex);
-	idle_time = get_cpu_idle_time_us(cpu, NULL);
-	put_cpu();
-	if (idle_time != -1ULL) {
-		/* Idle micro accounting is supported. Use finer thresholds */
-		od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
-		od_tuners.adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
-					     MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
-		/*
-		 * In nohz/micro accounting case we set the minimum frequency
-		 * not depending on HZ, but fixed (very low). The deferred
-		 * timer might skip some samples if idle/sleeping as needed.
-		*/
-		od_dbs_data.min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
-	} else {
-		/* For correct statistics, we need 10 ticks for each measure */
-		od_dbs_data.min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
-			jiffies_to_usecs(10);
-	}
-
 	return cpufreq_register_governor(&cpufreq_gov_ondemand);
 }
 
diff --git a/arch/cris/arch-v32/mach-a3/cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c
similarity index 88%
rename from arch/cris/arch-v32/mach-a3/cpufreq.c
rename to drivers/cpufreq/cris-artpec3-cpufreq.c
index ee391ec..ee142c4 100644
--- a/arch/cris/arch-v32/mach-a3/cpufreq.c
+++ b/drivers/cpufreq/cris-artpec3-cpufreq.c
@@ -27,23 +27,17 @@
 	return clk_ctrl.pll ? 200000 : 6000;
 }
 
-static void cris_freq_set_cpu_state(unsigned int state)
+static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
+		unsigned int state)
 {
-	int i = 0;
 	struct cpufreq_freqs freqs;
 	reg_clkgen_rw_clk_ctrl clk_ctrl;
 	clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
 
-#ifdef CONFIG_SMP
-	for_each_present_cpu(i)
-#endif
-	{
-		freqs.old = cris_freq_get_cpu_frequency(i);
-		freqs.new = cris_freq_table[state].frequency;
-		freqs.cpu = i;
-	}
+	freqs.old = cris_freq_get_cpu_frequency(policy->cpu);
+	freqs.new = cris_freq_table[state].frequency;
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	local_irq_disable();
 
@@ -57,7 +51,7 @@
 
 	local_irq_enable();
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 };
 
 static int cris_freq_verify(struct cpufreq_policy *policy)
@@ -75,7 +69,7 @@
 			target_freq, relation, &newstate))
 		return -EINVAL;
 
-	cris_freq_set_cpu_state(newstate);
+	cris_freq_set_cpu_state(policy, newstate);
 
 	return 0;
 }
diff --git a/arch/cris/arch-v32/mach-fs/cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c
similarity index 89%
rename from arch/cris/arch-v32/mach-fs/cpufreq.c
rename to drivers/cpufreq/cris-etraxfs-cpufreq.c
index d92cf70..1295223 100644
--- a/arch/cris/arch-v32/mach-fs/cpufreq.c
+++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c
@@ -27,20 +27,17 @@
 	return clk_ctrl.pll ? 200000 : 6000;
 }
 
-static void cris_freq_set_cpu_state(unsigned int state)
+static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
+		unsigned int state)
 {
-	int i;
 	struct cpufreq_freqs freqs;
 	reg_config_rw_clk_ctrl clk_ctrl;
 	clk_ctrl = REG_RD(config, regi_config, rw_clk_ctrl);
 
-	for_each_possible_cpu(i) {
-		freqs.old = cris_freq_get_cpu_frequency(i);
-		freqs.new = cris_freq_table[state].frequency;
-		freqs.cpu = i;
-	}
+	freqs.old = cris_freq_get_cpu_frequency(policy->cpu);
+	freqs.new = cris_freq_table[state].frequency;
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	local_irq_disable();
 
@@ -54,7 +51,7 @@
 
 	local_irq_enable();
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 };
 
 static int cris_freq_verify(struct cpufreq_policy *policy)
@@ -71,7 +68,7 @@
 	    (policy, cris_freq_table, target_freq, relation, &newstate))
 		return -EINVAL;
 
-	cris_freq_set_cpu_state(newstate);
+	cris_freq_set_cpu_state(policy, newstate);
 
 	return 0;
 }
diff --git a/arch/arm/mach-davinci/cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
similarity index 85%
rename from arch/arm/mach-davinci/cpufreq.c
rename to drivers/cpufreq/davinci-cpufreq.c
index 4729eaa..c33c76c 100644
--- a/arch/arm/mach-davinci/cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -30,8 +30,6 @@
 #include <mach/cpufreq.h>
 #include <mach/common.h>
 
-#include "clock.h"
-
 struct davinci_cpufreq {
 	struct device *dev;
 	struct clk *armclk;
@@ -79,18 +77,8 @@
 	struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
 	struct clk *armclk = cpufreq.armclk;
 
-	/*
-	 * Ensure desired rate is within allowed range.  Some govenors
-	 * (ondemand) will just pass target_freq=0 to get the minimum.
-	 */
-	if (target_freq < policy->cpuinfo.min_freq)
-		target_freq = policy->cpuinfo.min_freq;
-	if (target_freq > policy->cpuinfo.max_freq)
-		target_freq = policy->cpuinfo.max_freq;
-
 	freqs.old = davinci_getspeed(0);
 	freqs.new = clk_round_rate(armclk, target_freq * 1000) / 1000;
-	freqs.cpu = 0;
 
 	if (freqs.old == freqs.new)
 		return ret;
@@ -102,7 +90,7 @@
 	if (ret)
 		return -EINVAL;
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	/* if moving to higher frequency, up the voltage beforehand */
 	if (pdata->set_voltage && freqs.new > freqs.old) {
@@ -126,7 +114,7 @@
 		pdata->set_voltage(idx);
 
 out:
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	return ret;
 }
@@ -147,21 +135,16 @@
 			return result;
 	}
 
-	policy->cur = policy->min = policy->max = davinci_getspeed(0);
+	policy->cur = davinci_getspeed(0);
 
-	if (freq_table) {
-		result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-		if (!result)
-			cpufreq_frequency_table_get_attr(freq_table,
-							policy->cpu);
-	} else {
-		policy->cpuinfo.min_freq = policy->min;
-		policy->cpuinfo.max_freq = policy->max;
+	result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+	if (result) {
+		pr_err("%s: cpufreq_frequency_table_cpuinfo() failed",
+				__func__);
+		return result;
 	}
 
-	policy->min = policy->cpuinfo.min_freq;
-	policy->max = policy->cpuinfo.max_freq;
-	policy->cur = davinci_getspeed(0);
+	cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
 
 	/*
 	 * Time measurement across the target() function yields ~1500-1800us
diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c
index 72f0c3e..6ec6539 100644
--- a/drivers/cpufreq/dbx500-cpufreq.c
+++ b/drivers/cpufreq/dbx500-cpufreq.c
@@ -37,12 +37,6 @@
 	unsigned int idx;
 	int ret;
 
-	/* scale the target frequency to one of the extremes supported */
-	if (target_freq < policy->cpuinfo.min_freq)
-		target_freq = policy->cpuinfo.min_freq;
-	if (target_freq > policy->cpuinfo.max_freq)
-		target_freq = policy->cpuinfo.max_freq;
-
 	/* Lookup the next frequency */
 	if (cpufreq_frequency_table_target(policy, freq_table, target_freq,
 					relation, &idx))
@@ -55,8 +49,7 @@
 		return 0;
 
 	/* pre-change notification */
-	for_each_cpu(freqs.cpu, policy->cpus)
-		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	/* update armss clk frequency */
 	ret = clk_set_rate(armss_clk, freqs.new * 1000);
@@ -68,8 +61,7 @@
 	}
 
 	/* post change notification */
-	for_each_cpu(freqs.cpu, policy->cpus)
-		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	return 0;
 }
@@ -79,15 +71,15 @@
 	int i = 0;
 	unsigned long freq = clk_get_rate(armss_clk) / 1000;
 
-	while (freq_table[i].frequency != CPUFREQ_TABLE_END) {
-		if (freq <= freq_table[i].frequency)
+	/* The value is rounded to closest frequency in the defined table. */
+	while (freq_table[i + 1].frequency != CPUFREQ_TABLE_END) {
+		if (freq < freq_table[i].frequency +
+		   (freq_table[i + 1].frequency - freq_table[i].frequency) / 2)
 			return freq_table[i].frequency;
 		i++;
 	}
 
-	/* We could not find a corresponding frequency. */
-	pr_err("dbx500-cpufreq: Failed to find cpufreq speed\n");
-	return 0;
+	return freq_table[i].frequency;
 }
 
 static int __cpuinit dbx500_cpufreq_init(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 3fffbe6..37380fb 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -104,7 +104,7 @@
 }
 
 static int eps_set_state(struct eps_cpu_data *centaur,
-			 unsigned int cpu,
+			 struct cpufreq_policy *policy,
 			 u32 dest_state)
 {
 	struct cpufreq_freqs freqs;
@@ -112,10 +112,9 @@
 	int err = 0;
 	int i;
 
-	freqs.old = eps_get(cpu);
+	freqs.old = eps_get(policy->cpu);
 	freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff);
-	freqs.cpu = cpu;
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	/* Wait while CPU is busy */
 	rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
@@ -162,7 +161,7 @@
 		current_multiplier);
 	}
 #endif
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 	return err;
 }
 
@@ -190,7 +189,7 @@
 
 	/* Make frequency transition */
 	dest_state = centaur->freq_table[newstate].index & 0xffff;
-	ret = eps_set_state(centaur, cpu, dest_state);
+	ret = eps_set_state(centaur, policy, dest_state);
 	if (ret)
 		printk(KERN_ERR "eps: Timeout!\n");
 	return ret;
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 960671f..658d860 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -117,15 +117,15 @@
  *	There is no return value.
  */
 
-static void elanfreq_set_cpu_state(unsigned int state)
+static void elanfreq_set_cpu_state(struct cpufreq_policy *policy,
+		unsigned int state)
 {
 	struct cpufreq_freqs    freqs;
 
 	freqs.old = elanfreq_get_cpu_frequency(0);
 	freqs.new = elan_multiplier[state].clock;
-	freqs.cpu = 0; /* elanfreq.c is UP only driver */
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n",
 			elan_multiplier[state].clock);
@@ -161,7 +161,7 @@
 	udelay(10000);
 	local_irq_enable();
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 };
 
 
@@ -188,7 +188,7 @@
 				target_freq, relation, &newstate))
 		return -EINVAL;
 
-	elanfreq_set_cpu_state(newstate);
+	elanfreq_set_cpu_state(policy, newstate);
 
 	return 0;
 }
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 78057a3..475b4f6 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -70,7 +70,6 @@
 
 	freqs.old = policy->cur;
 	freqs.new = target_freq;
-	freqs.cpu = policy->cpu;
 
 	if (freqs.new == freqs.old)
 		goto out;
@@ -105,8 +104,7 @@
 	}
 	arm_volt = volt_table[index];
 
-	for_each_cpu(freqs.cpu, policy->cpus)
-		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	/* When the new frequency is higher than current frequency */
 	if ((freqs.new > freqs.old) && !safe_arm_volt) {
@@ -131,8 +129,7 @@
 
 	exynos_info->set_freq(old_index, index);
 
-	for_each_cpu(freqs.cpu, policy->cpus)
-		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	/* When the new frequency is lower than current frequency */
 	if ((freqs.new < freqs.old) ||
@@ -297,7 +294,7 @@
 	else if (soc_is_exynos5250())
 		ret = exynos5250_cpufreq_init(exynos_info);
 	else
-		pr_err("%s: CPU type not found\n", __func__);
+		return 0;
 
 	if (ret)
 		goto err_vdd_arm;
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
new file mode 100644
index 0000000..0c74018
--- /dev/null
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Amit Daniel Kachhap <amit.daniel@samsung.com>
+ *
+ * EXYNOS5440 - CPU frequency scaling support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/opp.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Register definitions */
+#define XMU_DVFS_CTRL		0x0060
+#define XMU_PMU_P0_7		0x0064
+#define XMU_C0_3_PSTATE		0x0090
+#define XMU_P_LIMIT		0x00a0
+#define XMU_P_STATUS		0x00a4
+#define XMU_PMUEVTEN		0x00d0
+#define XMU_PMUIRQEN		0x00d4
+#define XMU_PMUIRQ		0x00d8
+
+/* PMU mask and shift definations */
+#define P_VALUE_MASK		0x7
+
+#define XMU_DVFS_CTRL_EN_SHIFT	0
+
+#define P0_7_CPUCLKDEV_SHIFT	21
+#define P0_7_CPUCLKDEV_MASK	0x7
+#define P0_7_ATBCLKDEV_SHIFT	18
+#define P0_7_ATBCLKDEV_MASK	0x7
+#define P0_7_CSCLKDEV_SHIFT	15
+#define P0_7_CSCLKDEV_MASK	0x7
+#define P0_7_CPUEMA_SHIFT	28
+#define P0_7_CPUEMA_MASK	0xf
+#define P0_7_L2EMA_SHIFT	24
+#define P0_7_L2EMA_MASK		0xf
+#define P0_7_VDD_SHIFT		8
+#define P0_7_VDD_MASK		0x7f
+#define P0_7_FREQ_SHIFT		0
+#define P0_7_FREQ_MASK		0xff
+
+#define C0_3_PSTATE_VALID_SHIFT	8
+#define C0_3_PSTATE_CURR_SHIFT	4
+#define C0_3_PSTATE_NEW_SHIFT	0
+
+#define PSTATE_CHANGED_EVTEN_SHIFT	0
+
+#define PSTATE_CHANGED_IRQEN_SHIFT	0
+
+#define PSTATE_CHANGED_SHIFT		0
+
+/* some constant values for clock divider calculation */
+#define CPU_DIV_FREQ_MAX	500
+#define CPU_DBG_FREQ_MAX	375
+#define CPU_ATB_FREQ_MAX	500
+
+#define PMIC_LOW_VOLT		0x30
+#define PMIC_HIGH_VOLT		0x28
+
+#define CPUEMA_HIGH		0x2
+#define CPUEMA_MID		0x4
+#define CPUEMA_LOW		0x7
+
+#define L2EMA_HIGH		0x1
+#define L2EMA_MID		0x3
+#define L2EMA_LOW		0x4
+
+#define DIV_TAB_MAX	2
+/* frequency unit is 20MHZ */
+#define FREQ_UNIT	20
+#define MAX_VOLTAGE	1550000 /* In microvolt */
+#define VOLTAGE_STEP	12500	/* In microvolt */
+
+#define CPUFREQ_NAME		"exynos5440_dvfs"
+#define DEF_TRANS_LATENCY	100000
+
+enum cpufreq_level_index {
+	L0, L1, L2, L3, L4,
+	L5, L6, L7, L8, L9,
+};
+#define CPUFREQ_LEVEL_END	(L7 + 1)
+
+struct exynos_dvfs_data {
+	void __iomem *base;
+	struct resource *mem;
+	int irq;
+	struct clk *cpu_clk;
+	unsigned int cur_frequency;
+	unsigned int latency;
+	struct cpufreq_frequency_table *freq_table;
+	unsigned int freq_count;
+	struct device *dev;
+	bool dvfs_enabled;
+	struct work_struct irq_work;
+};
+
+static struct exynos_dvfs_data *dvfs_info;
+static DEFINE_MUTEX(cpufreq_lock);
+static struct cpufreq_freqs freqs;
+
+static int init_div_table(void)
+{
+	struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
+	unsigned int tmp, clk_div, ema_div, freq, volt_id;
+	int i = 0;
+	struct opp *opp;
+
+	rcu_read_lock();
+	for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
+
+		opp = opp_find_freq_exact(dvfs_info->dev,
+					freq_tbl[i].frequency * 1000, true);
+		if (IS_ERR(opp)) {
+			rcu_read_unlock();
+			dev_err(dvfs_info->dev,
+				"failed to find valid OPP for %u KHZ\n",
+				freq_tbl[i].frequency);
+			return PTR_ERR(opp);
+		}
+
+		freq = freq_tbl[i].frequency / 1000; /* In MHZ */
+		clk_div = ((freq / CPU_DIV_FREQ_MAX) & P0_7_CPUCLKDEV_MASK)
+					<< P0_7_CPUCLKDEV_SHIFT;
+		clk_div |= ((freq / CPU_ATB_FREQ_MAX) & P0_7_ATBCLKDEV_MASK)
+					<< P0_7_ATBCLKDEV_SHIFT;
+		clk_div |= ((freq / CPU_DBG_FREQ_MAX) & P0_7_CSCLKDEV_MASK)
+					<< P0_7_CSCLKDEV_SHIFT;
+
+		/* Calculate EMA */
+		volt_id = opp_get_voltage(opp);
+		volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
+		if (volt_id < PMIC_HIGH_VOLT) {
+			ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
+				(L2EMA_HIGH << P0_7_L2EMA_SHIFT);
+		} else if (volt_id > PMIC_LOW_VOLT) {
+			ema_div = (CPUEMA_LOW << P0_7_CPUEMA_SHIFT) |
+				(L2EMA_LOW << P0_7_L2EMA_SHIFT);
+		} else {
+			ema_div = (CPUEMA_MID << P0_7_CPUEMA_SHIFT) |
+				(L2EMA_MID << P0_7_L2EMA_SHIFT);
+		}
+
+		tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT)
+			| ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT));
+
+		__raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * i);
+	}
+
+	rcu_read_unlock();
+	return 0;
+}
+
+static void exynos_enable_dvfs(void)
+{
+	unsigned int tmp, i, cpu;
+	struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
+	/* Disable DVFS */
+	__raw_writel(0,	dvfs_info->base + XMU_DVFS_CTRL);
+
+	/* Enable PSTATE Change Event */
+	tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN);
+	tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT);
+	 __raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
+
+	/* Enable PSTATE Change IRQ */
+	tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN);
+	tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT);
+	 __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
+
+	/* Set initial performance index */
+	for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
+		if (freq_table[i].frequency == dvfs_info->cur_frequency)
+			break;
+
+	if (freq_table[i].frequency == CPUFREQ_TABLE_END) {
+		dev_crit(dvfs_info->dev, "Boot up frequency not supported\n");
+		/* Assign the highest frequency */
+		i = 0;
+		dvfs_info->cur_frequency = freq_table[i].frequency;
+	}
+
+	dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ",
+						dvfs_info->cur_frequency);
+
+	for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
+		tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
+		tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
+		tmp |= (i << C0_3_PSTATE_NEW_SHIFT);
+		__raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
+	}
+
+	/* Enable DVFS */
+	__raw_writel(1 << XMU_DVFS_CTRL_EN_SHIFT,
+				dvfs_info->base + XMU_DVFS_CTRL);
+}
+
+static int exynos_verify_speed(struct cpufreq_policy *policy)
+{
+	return cpufreq_frequency_table_verify(policy,
+					      dvfs_info->freq_table);
+}
+
+static unsigned int exynos_getspeed(unsigned int cpu)
+{
+	return dvfs_info->cur_frequency;
+}
+
+static int exynos_target(struct cpufreq_policy *policy,
+			  unsigned int target_freq,
+			  unsigned int relation)
+{
+	unsigned int index, tmp;
+	int ret = 0, i;
+	struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
+
+	mutex_lock(&cpufreq_lock);
+
+	ret = cpufreq_frequency_table_target(policy, freq_table,
+					   target_freq, relation, &index);
+	if (ret)
+		goto out;
+
+	freqs.old = dvfs_info->cur_frequency;
+	freqs.new = freq_table[index].frequency;
+
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+	/* Set the target frequency in all C0_3_PSTATE register */
+	for_each_cpu(i, policy->cpus) {
+		tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
+		tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
+		tmp |= (index << C0_3_PSTATE_NEW_SHIFT);
+
+		__raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
+	}
+out:
+	mutex_unlock(&cpufreq_lock);
+	return ret;
+}
+
+static void exynos_cpufreq_work(struct work_struct *work)
+{
+	unsigned int cur_pstate, index;
+	struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
+	struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
+
+	/* Ensure we can access cpufreq structures */
+	if (unlikely(dvfs_info->dvfs_enabled == false))
+		goto skip_work;
+
+	mutex_lock(&cpufreq_lock);
+	freqs.old = dvfs_info->cur_frequency;
+
+	cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS);
+	if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1)
+		index = (cur_pstate >> C0_3_PSTATE_CURR_SHIFT) & P_VALUE_MASK;
+	else
+		index = (cur_pstate >> C0_3_PSTATE_NEW_SHIFT) & P_VALUE_MASK;
+
+	if (likely(index < dvfs_info->freq_count)) {
+		freqs.new = freq_table[index].frequency;
+		dvfs_info->cur_frequency = freqs.new;
+	} else {
+		dev_crit(dvfs_info->dev, "New frequency out of range\n");
+		freqs.new = dvfs_info->cur_frequency;
+	}
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+	cpufreq_cpu_put(policy);
+	mutex_unlock(&cpufreq_lock);
+skip_work:
+	enable_irq(dvfs_info->irq);
+}
+
+static irqreturn_t exynos_cpufreq_irq(int irq, void *id)
+{
+	unsigned int tmp;
+
+	tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQ);
+	if (tmp >> PSTATE_CHANGED_SHIFT & 0x1) {
+		__raw_writel(tmp, dvfs_info->base + XMU_PMUIRQ);
+		disable_irq_nosync(irq);
+		schedule_work(&dvfs_info->irq_work);
+	}
+	return IRQ_HANDLED;
+}
+
+static void exynos_sort_descend_freq_table(void)
+{
+	struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
+	int i = 0, index;
+	unsigned int tmp_freq;
+	/*
+	 * Exynos5440 clock controller state logic expects the cpufreq table to
+	 * be in descending order. But the OPP library constructs the table in
+	 * ascending order. So to make the table descending we just need to
+	 * swap the i element with the N - i element.
+	 */
+	for (i = 0; i < dvfs_info->freq_count / 2; i++) {
+		index = dvfs_info->freq_count - i - 1;
+		tmp_freq = freq_tbl[i].frequency;
+		freq_tbl[i].frequency = freq_tbl[index].frequency;
+		freq_tbl[index].frequency = tmp_freq;
+	}
+}
+
+static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+	int ret;
+
+	ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table);
+	if (ret) {
+		dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret);
+		return ret;
+	}
+
+	policy->cur = dvfs_info->cur_frequency;
+	policy->cpuinfo.transition_latency = dvfs_info->latency;
+	cpumask_setall(policy->cpus);
+
+	cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu);
+
+	return 0;
+}
+
+static struct cpufreq_driver exynos_driver = {
+	.flags		= CPUFREQ_STICKY,
+	.verify		= exynos_verify_speed,
+	.target		= exynos_target,
+	.get		= exynos_getspeed,
+	.init		= exynos_cpufreq_cpu_init,
+	.name		= CPUFREQ_NAME,
+};
+
+static const struct of_device_id exynos_cpufreq_match[] = {
+	{
+		.compatible = "samsung,exynos5440-cpufreq",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, exynos_cpufreq_match);
+
+static int exynos_cpufreq_probe(struct platform_device *pdev)
+{
+	int ret = -EINVAL;
+	struct device_node *np;
+	struct resource res;
+
+	np =  pdev->dev.of_node;
+	if (!np)
+		return -ENODEV;
+
+	dvfs_info = devm_kzalloc(&pdev->dev, sizeof(*dvfs_info), GFP_KERNEL);
+	if (!dvfs_info) {
+		ret = -ENOMEM;
+		goto err_put_node;
+	}
+
+	dvfs_info->dev = &pdev->dev;
+
+	ret = of_address_to_resource(np, 0, &res);
+	if (ret)
+		goto err_put_node;
+
+	dvfs_info->base = devm_ioremap_resource(dvfs_info->dev, &res);
+	if (IS_ERR(dvfs_info->base)) {
+		ret = PTR_ERR(dvfs_info->base);
+		goto err_put_node;
+	}
+
+	dvfs_info->irq = irq_of_parse_and_map(np, 0);
+	if (!dvfs_info->irq) {
+		dev_err(dvfs_info->dev, "No cpufreq irq found\n");
+		ret = -ENODEV;
+		goto err_put_node;
+	}
+
+	ret = of_init_opp_table(dvfs_info->dev);
+	if (ret) {
+		dev_err(dvfs_info->dev, "failed to init OPP table: %d\n", ret);
+		goto err_put_node;
+	}
+
+	ret = opp_init_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+	if (ret) {
+		dev_err(dvfs_info->dev,
+			"failed to init cpufreq table: %d\n", ret);
+		goto err_put_node;
+	}
+	dvfs_info->freq_count = opp_get_opp_count(dvfs_info->dev);
+	exynos_sort_descend_freq_table();
+
+	if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency))
+		dvfs_info->latency = DEF_TRANS_LATENCY;
+
+	dvfs_info->cpu_clk = devm_clk_get(dvfs_info->dev, "armclk");
+	if (IS_ERR(dvfs_info->cpu_clk)) {
+		dev_err(dvfs_info->dev, "Failed to get cpu clock\n");
+		ret = PTR_ERR(dvfs_info->cpu_clk);
+		goto err_free_table;
+	}
+
+	dvfs_info->cur_frequency = clk_get_rate(dvfs_info->cpu_clk);
+	if (!dvfs_info->cur_frequency) {
+		dev_err(dvfs_info->dev, "Failed to get clock rate\n");
+		ret = -EINVAL;
+		goto err_free_table;
+	}
+	dvfs_info->cur_frequency /= 1000;
+
+	INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work);
+	ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq,
+				exynos_cpufreq_irq, IRQF_TRIGGER_NONE,
+				CPUFREQ_NAME, dvfs_info);
+	if (ret) {
+		dev_err(dvfs_info->dev, "Failed to register IRQ\n");
+		goto err_free_table;
+	}
+
+	ret = init_div_table();
+	if (ret) {
+		dev_err(dvfs_info->dev, "Failed to initialise div table\n");
+		goto err_free_table;
+	}
+
+	exynos_enable_dvfs();
+	ret = cpufreq_register_driver(&exynos_driver);
+	if (ret) {
+		dev_err(dvfs_info->dev,
+			"%s: failed to register cpufreq driver\n", __func__);
+		goto err_free_table;
+	}
+
+	of_node_put(np);
+	dvfs_info->dvfs_enabled = true;
+	return 0;
+
+err_free_table:
+	opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+err_put_node:
+	of_node_put(np);
+	dev_err(dvfs_info->dev, "%s: failed initialization\n", __func__);
+	return ret;
+}
+
+static int exynos_cpufreq_remove(struct platform_device *pdev)
+{
+	cpufreq_unregister_driver(&exynos_driver);
+	opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+	return 0;
+}
+
+static struct platform_driver exynos_cpufreq_platdrv = {
+	.driver = {
+		.name	= "exynos5440-cpufreq",
+		.owner	= THIS_MODULE,
+		.of_match_table = exynos_cpufreq_match,
+	},
+	.probe		= exynos_cpufreq_probe,
+	.remove		= exynos_cpufreq_remove,
+};
+module_platform_driver(exynos_cpufreq_platdrv);
+
+MODULE_AUTHOR("Amit Daniel Kachhap <amit.daniel@samsung.com>");
+MODULE_DESCRIPTION("Exynos5440 cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index 456bee0..3dfc99b 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -251,14 +251,13 @@
  * set cpu speed in khz.
  **/
 
-static void gx_set_cpuspeed(unsigned int khz)
+static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz)
 {
 	u8 suscfg, pmer1;
 	unsigned int new_khz;
 	unsigned long flags;
 	struct cpufreq_freqs freqs;
 
-	freqs.cpu = 0;
 	freqs.old = gx_get_cpuspeed(0);
 
 	new_khz = gx_validate_speed(khz, &gx_params->on_duration,
@@ -266,11 +265,9 @@
 
 	freqs.new = new_khz;
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 	local_irq_save(flags);
 
-
-
 	if (new_khz != stock_freq) {
 		/* if new khz == 100% of CPU speed, it is special case */
 		switch (gx_params->cs55x0->device) {
@@ -317,7 +314,7 @@
 
 	gx_params->pci_suscfg = suscfg;
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n",
 		gx_params->on_duration * 32, gx_params->off_duration * 32);
@@ -397,7 +394,7 @@
 		tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2);
 	}
 
-	gx_set_cpuspeed(tmp_freq);
+	gx_set_cpuspeed(policy, tmp_freq);
 
 	return 0;
 }
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
similarity index 93%
rename from arch/ia64/kernel/cpufreq/acpi-cpufreq.c
rename to drivers/cpufreq/ia64-acpi-cpufreq.c
index f09b174..c0075db 100644
--- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -1,5 +1,4 @@
 /*
- * arch/ia64/kernel/cpufreq/acpi-cpufreq.c
  * This file provides the ACPI based P-state support. This
  * module works with generic cpufreq infrastructure. Most of
  * the code is based on i386 version
@@ -137,7 +136,7 @@
 static int
 processor_set_freq (
 	struct cpufreq_acpi_io	*data,
-	unsigned int		cpu,
+	struct cpufreq_policy   *policy,
 	int			state)
 {
 	int			ret = 0;
@@ -149,8 +148,8 @@
 	pr_debug("processor_set_freq\n");
 
 	saved_mask = current->cpus_allowed;
-	set_cpus_allowed_ptr(current, cpumask_of(cpu));
-	if (smp_processor_id() != cpu) {
+	set_cpus_allowed_ptr(current, cpumask_of(policy->cpu));
+	if (smp_processor_id() != policy->cpu) {
 		retval = -EAGAIN;
 		goto migrate_end;
 	}
@@ -170,12 +169,11 @@
 		data->acpi_data.state, state);
 
 	/* cpufreq frequency struct */
-	cpufreq_freqs.cpu = cpu;
 	cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
 	cpufreq_freqs.new = data->freq_table[state].frequency;
 
 	/* notify cpufreq */
-	cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_PRECHANGE);
 
 	/*
 	 * First we write the target state's 'control' value to the
@@ -189,17 +187,20 @@
 	ret = processor_set_pstate(value);
 	if (ret) {
 		unsigned int tmp = cpufreq_freqs.new;
-		cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
+		cpufreq_notify_transition(policy, &cpufreq_freqs,
+				CPUFREQ_POSTCHANGE);
 		cpufreq_freqs.new = cpufreq_freqs.old;
 		cpufreq_freqs.old = tmp;
-		cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
-		cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
+		cpufreq_notify_transition(policy, &cpufreq_freqs,
+				CPUFREQ_PRECHANGE);
+		cpufreq_notify_transition(policy, &cpufreq_freqs,
+				CPUFREQ_POSTCHANGE);
 		printk(KERN_WARNING "Transition failed with error %d\n", ret);
 		retval = -ENODEV;
 		goto migrate_end;
 	}
 
-	cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &cpufreq_freqs, CPUFREQ_POSTCHANGE);
 
 	data->acpi_data.state = state;
 
@@ -240,7 +241,7 @@
 	if (result)
 		return (result);
 
-	result = processor_set_freq(data, policy->cpu, next_state);
+	result = processor_set_freq(data, policy, next_state);
 
 	return (result);
 }
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 54e336d..b78bc35 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -50,7 +50,7 @@
 	struct cpufreq_freqs freqs;
 	struct opp *opp;
 	unsigned long freq_hz, volt, volt_old;
-	unsigned int index, cpu;
+	unsigned int index;
 	int ret;
 
 	ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
@@ -68,10 +68,7 @@
 	if (freqs.old == freqs.new)
 		return 0;
 
-	for_each_online_cpu(cpu) {
-		freqs.cpu = cpu;
-		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-	}
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	rcu_read_lock();
 	opp = opp_find_freq_ceil(cpu_dev, &freq_hz);
@@ -166,10 +163,7 @@
 		}
 	}
 
-	for_each_online_cpu(cpu) {
-		freqs.cpu = cpu;
-		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-	}
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	return 0;
 }
diff --git a/arch/arm/mach-integrator/cpu.c b/drivers/cpufreq/integrator-cpufreq.c
similarity index 95%
rename from arch/arm/mach-integrator/cpu.c
rename to drivers/cpufreq/integrator-cpufreq.c
index 590c192..f7c99df 100644
--- a/arch/arm/mach-integrator/cpu.c
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -1,6 +1,4 @@
 /*
- *  linux/arch/arm/mach-integrator/cpu.c
- *
  *  Copyright (C) 2001-2002 Deep Blue Solutions Ltd.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -123,14 +121,12 @@
 	vco = icst_hz_to_vco(&cclk_params, target_freq * 1000);
 	freqs.new = icst_hz(&cclk_params, vco) / 1000;
 
-	freqs.cpu = policy->cpu;
-
 	if (freqs.old == freqs.new) {
 		set_cpus_allowed(current, cpus_allowed);
 		return 0;
 	}
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	cm_osc = __raw_readl(CM_OSC);
 
@@ -151,7 +147,7 @@
 	 */
 	set_cpus_allowed(current, cpus_allowed);
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	return 0;
 }
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 6133ef5..cc3a8e6 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1,5 +1,5 @@
 /*
- * cpufreq_snb.c: Native P state management for Intel processors
+ * intel_pstate.c: Native P state management for Intel processors
  *
  * (C) Copyright 2012 Intel Corporation
  * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
@@ -657,30 +657,27 @@
 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 {
 	struct cpudata *cpu;
-	int min, max;
 
 	cpu = all_cpu_data[policy->cpu];
 
 	if (!policy->cpuinfo.max_freq)
 		return -ENODEV;
 
-	intel_pstate_get_min_max(cpu, &min, &max);
-
-	limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
-	limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
-	limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
-
-	limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq;
-	limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100);
-	limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
-
 	if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
 		limits.min_perf_pct = 100;
 		limits.min_perf = int_tofp(1);
 		limits.max_perf_pct = 100;
 		limits.max_perf = int_tofp(1);
 		limits.no_turbo = 0;
+		return 0;
 	}
+	limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
+	limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
+	limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
+
+	limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq;
+	limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100);
+	limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
 
 	return 0;
 }
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index 0e83e3c..d36ea8d 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -55,7 +55,8 @@
 	return kirkwood_freq_table[0].frequency;
 }
 
-static void kirkwood_cpufreq_set_cpu_state(unsigned int index)
+static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy,
+		unsigned int index)
 {
 	struct cpufreq_freqs freqs;
 	unsigned int state = kirkwood_freq_table[index].index;
@@ -63,9 +64,8 @@
 
 	freqs.old = kirkwood_cpufreq_get_cpu_frequency(0);
 	freqs.new = kirkwood_freq_table[index].frequency;
-	freqs.cpu = 0; /* Kirkwood is UP */
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n",
 		kirkwood_freq_table[index].frequency);
@@ -99,7 +99,7 @@
 
 		local_irq_enable();
 	}
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 };
 
 static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy)
@@ -117,7 +117,7 @@
 				target_freq, relation, &index))
 		return -EINVAL;
 
-	kirkwood_cpufreq_set_cpu_state(index);
+	kirkwood_cpufreq_set_cpu_state(policy, index);
 
 	return 0;
 }
@@ -175,11 +175,9 @@
 		dev_err(&pdev->dev, "Cannot get memory resource\n");
 		return -ENODEV;
 	}
-	priv.base = devm_request_and_ioremap(&pdev->dev, res);
-	if (!priv.base) {
-		dev_err(&pdev->dev, "Cannot ioremap\n");
-		return -EADDRNOTAVAIL;
-	}
+	priv.base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv.base))
+		return PTR_ERR(priv.base);
 
 	np = of_find_node_by_path("/cpus/cpu@0");
 	if (!np)
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 1180d53..b448638 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -242,7 +242,8 @@
  * Sets a new clock ratio.
  */
 
-static void longhaul_setstate(unsigned int table_index)
+static void longhaul_setstate(struct cpufreq_policy *policy,
+		unsigned int table_index)
 {
 	unsigned int mults_index;
 	int speed, mult;
@@ -267,9 +268,8 @@
 
 	freqs.old = calc_speed(longhaul_get_cpu_mult());
 	freqs.new = speed;
-	freqs.cpu = 0; /* longhaul.c is UP only driver */
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
 			fsb, mult/10, mult%10, print_speed(speed/1000));
@@ -386,7 +386,7 @@
 		}
 	}
 	/* Report true CPU frequency */
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	if (!bm_timeout)
 		printk(KERN_INFO PFX "Warning: Timeout while waiting for "
@@ -648,7 +648,7 @@
 		return 0;
 
 	if (!can_scale_voltage)
-		longhaul_setstate(table_index);
+		longhaul_setstate(policy, table_index);
 	else {
 		/* On test system voltage transitions exceeding single
 		 * step up or down were turning motherboard off. Both
@@ -663,7 +663,7 @@
 		while (i != table_index) {
 			vid = (longhaul_table[i].index >> 8) & 0x1f;
 			if (vid != current_vid) {
-				longhaul_setstate(i);
+				longhaul_setstate(policy, i);
 				current_vid = vid;
 				msleep(200);
 			}
@@ -672,7 +672,7 @@
 			else
 				i--;
 		}
-		longhaul_setstate(table_index);
+		longhaul_setstate(policy, table_index);
 	}
 	longhaul_index = table_index;
 	return 0;
@@ -998,15 +998,17 @@
 
 static void __exit longhaul_exit(void)
 {
+	struct cpufreq_policy *policy = cpufreq_cpu_get(0);
 	int i;
 
 	for (i = 0; i < numscales; i++) {
 		if (mults[i] == maxmult) {
-			longhaul_setstate(i);
+			longhaul_setstate(policy, i);
 			break;
 		}
 	}
 
+	cpufreq_cpu_put(policy);
 	cpufreq_unregister_driver(&longhaul_driver);
 	kfree(longhaul_table);
 }
diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
similarity index 95%
rename from arch/mips/kernel/cpufreq/loongson2_cpufreq.c
rename to drivers/cpufreq/loongson2_cpufreq.c
index 3237c52..8488957 100644
--- a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -61,9 +61,6 @@
 	struct cpufreq_freqs freqs;
 	unsigned int freq;
 
-	if (!cpu_online(cpu))
-		return -ENODEV;
-
 	cpus_allowed = current->cpus_allowed;
 	set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
@@ -80,7 +77,6 @@
 
 	pr_debug("cpufreq: requested frequency %u Hz\n", target_freq * 1000);
 
-	freqs.cpu = cpu;
 	freqs.old = loongson2_cpufreq_get(cpu);
 	freqs.new = freq;
 	freqs.flags = 0;
@@ -89,7 +85,7 @@
 		return 0;
 
 	/* notifiers */
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	set_cpus_allowed_ptr(current, &cpus_allowed);
 
@@ -97,7 +93,7 @@
 	clk_set_rate(cpuclk, freq);
 
 	/* notifiers */
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	pr_debug("cpufreq: set frequency %u kHz\n", freq);
 
@@ -110,9 +106,6 @@
 	unsigned long rate;
 	int ret;
 
-	if (!cpu_online(policy->cpu))
-		return -ENODEV;
-
 	cpuclk = clk_get(NULL, "cpu_clk");
 	if (IS_ERR(cpuclk)) {
 		printk(KERN_ERR "cpufreq: couldn't get CPU clk\n");
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index d4c4989..cdd6291 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -158,11 +158,10 @@
 
 	freqs.old = maple_cpu_freqs[maple_pmode_cur].frequency;
 	freqs.new = maple_cpu_freqs[newstate].frequency;
-	freqs.cpu = 0;
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 	rc = maple_scom_switch_freq(newstate);
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	mutex_unlock(&maple_switch_mutex);
 
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 9128c07..0279d18 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -25,6 +25,7 @@
 #include <linux/opp.h>
 #include <linux/cpu.h>
 #include <linux/module.h>
+#include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 
 #include <asm/smp_plat.h>
@@ -88,16 +89,12 @@
 	}
 
 	freqs.old = omap_getspeed(policy->cpu);
-	freqs.cpu = policy->cpu;
 
 	if (freqs.old == freqs.new && policy->cur == freqs.new)
 		return ret;
 
 	/* notifiers */
-	for_each_cpu(i, policy->cpus) {
-		freqs.cpu = i;
-		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-	}
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	freq = freqs.new * 1000;
 	ret = clk_round_rate(mpu_clk, freq);
@@ -157,10 +154,7 @@
 
 done:
 	/* notifiers */
-	for_each_cpu(i, policy->cpus) {
-		freqs.cpu = i;
-		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-	}
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	return ret;
 }
@@ -184,7 +178,7 @@
 		goto fail_ck;
 	}
 
-	policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu);
+	policy->cur = omap_getspeed(policy->cpu);
 
 	if (!freq_table)
 		result = opp_init_cpufreq_table(mpu_dev, &freq_table);
@@ -203,8 +197,6 @@
 
 	cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
 
-	policy->min = policy->cpuinfo.min_freq;
-	policy->max = policy->cpuinfo.max_freq;
 	policy->cur = omap_getspeed(policy->cpu);
 
 	/*
@@ -252,7 +244,7 @@
 	.attr		= omap_cpufreq_attr,
 };
 
-static int __init omap_cpufreq_init(void)
+static int omap_cpufreq_probe(struct platform_device *pdev)
 {
 	mpu_dev = get_cpu_device(0);
 	if (!mpu_dev) {
@@ -280,12 +272,20 @@
 	return cpufreq_register_driver(&omap_driver);
 }
 
-static void __exit omap_cpufreq_exit(void)
+static int omap_cpufreq_remove(struct platform_device *pdev)
 {
-	cpufreq_unregister_driver(&omap_driver);
+	return cpufreq_unregister_driver(&omap_driver);
 }
 
+static struct platform_driver omap_cpufreq_platdrv = {
+	.driver = {
+		.name	= "omap-cpufreq",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= omap_cpufreq_probe,
+	.remove		= omap_cpufreq_remove,
+};
+module_platform_driver(omap_cpufreq_platdrv);
+
 MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs");
 MODULE_LICENSE("GPL");
-module_init(omap_cpufreq_init);
-module_exit(omap_cpufreq_exit);
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 827629c9..421ef37 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -58,8 +58,7 @@
 {
 	u32 l, h;
 
-	if (!cpu_online(cpu) ||
-	    (newstate > DC_DISABLE) || (newstate == DC_RESV))
+	if ((newstate > DC_DISABLE) || (newstate == DC_RESV))
 		return -EINVAL;
 
 	rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
@@ -125,10 +124,7 @@
 		return 0;
 
 	/* notifiers */
-	for_each_cpu(i, policy->cpus) {
-		freqs.cpu = i;
-		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-	}
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	/* run on each logical CPU,
 	 * see section 13.15.3 of IA32 Intel Architecture Software
@@ -138,10 +134,7 @@
 		cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
 
 	/* notifiers */
-	for_each_cpu(i, policy->cpus) {
-		freqs.cpu = i;
-		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-	}
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	return 0;
 }
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 503996a..0de0008 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -215,8 +215,7 @@
 		(pcch_virt_addr + pcc_cpu_data->input_offset));
 
 	freqs.new = target_freq;
-	freqs.cpu = cpu;
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	input_buffer = 0x1 | (((target_freq * 100)
 			       / (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
@@ -237,7 +236,7 @@
 	}
 	iowrite16(0, &pcch_hdr->status);
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 	pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu);
 	spin_unlock(&pcc_lock);
 
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index af23e0b..ea0222a 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -68,7 +68,8 @@
  *
  *   Tries to change the PowerNow! multiplier
  */
-static void powernow_k6_set_state(unsigned int best_i)
+static void powernow_k6_set_state(struct cpufreq_policy *policy,
+		unsigned int best_i)
 {
 	unsigned long outvalue = 0, invalue = 0;
 	unsigned long msrval;
@@ -81,9 +82,8 @@
 
 	freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
 	freqs.new = busfreq * clock_ratio[best_i].index;
-	freqs.cpu = 0; /* powernow-k6.c is UP only driver */
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	/* we now need to transform best_i to the BVC format, see AMD#23446 */
 
@@ -98,7 +98,7 @@
 	msrval = POWERNOW_IOPORT + 0x0;
 	wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	return;
 }
@@ -136,7 +136,7 @@
 				target_freq, relation, &newstate))
 		return -EINVAL;
 
-	powernow_k6_set_state(newstate);
+	powernow_k6_set_state(policy, newstate);
 
 	return 0;
 }
@@ -182,7 +182,7 @@
 	unsigned int i;
 	for (i = 0; i < 8; i++) {
 		if (i == max_multiplier)
-			powernow_k6_set_state(i);
+			powernow_k6_set_state(policy, i);
 	}
 	cpufreq_frequency_table_put_attr(policy->cpu);
 	return 0;
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 334cc2f..53888da 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -248,7 +248,7 @@
 }
 
 
-static void change_speed(unsigned int index)
+static void change_speed(struct cpufreq_policy *policy, unsigned int index)
 {
 	u8 fid, vid;
 	struct cpufreq_freqs freqs;
@@ -263,15 +263,13 @@
 	fid = powernow_table[index].index & 0xFF;
 	vid = (powernow_table[index].index & 0xFF00) >> 8;
 
-	freqs.cpu = 0;
-
 	rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val);
 	cfid = fidvidstatus.bits.CFID;
 	freqs.old = fsb * fid_codes[cfid] / 10;
 
 	freqs.new = powernow_table[index].frequency;
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	/* Now do the magic poking into the MSRs.  */
 
@@ -292,7 +290,7 @@
 	if (have_a0 == 1)
 		local_irq_enable();
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 }
 
 
@@ -546,7 +544,7 @@
 				relation, &newstate))
 		return -EINVAL;
 
-	change_speed(newstate);
+	change_speed(policy, newstate);
 
 	return 0;
 }
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index d13a136..b828efe 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -928,9 +928,10 @@
 static int transition_frequency_fidvid(struct powernow_k8_data *data,
 		unsigned int index)
 {
+	struct cpufreq_policy *policy;
 	u32 fid = 0;
 	u32 vid = 0;
-	int res, i;
+	int res;
 	struct cpufreq_freqs freqs;
 
 	pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
@@ -959,10 +960,10 @@
 	freqs.old = find_khz_freq_from_fid(data->currfid);
 	freqs.new = find_khz_freq_from_fid(fid);
 
-	for_each_cpu(i, data->available_cores) {
-		freqs.cpu = i;
-		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-	}
+	policy = cpufreq_cpu_get(smp_processor_id());
+	cpufreq_cpu_put(policy);
+
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	res = transition_fid_vid(data, fid, vid);
 	if (res)
@@ -970,10 +971,7 @@
 
 	freqs.new = find_khz_freq_from_fid(data->currfid);
 
-	for_each_cpu(i, data->available_cores) {
-		freqs.cpu = i;
-		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-	}
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 	return res;
 }
 
@@ -1104,9 +1102,6 @@
 	struct init_on_cpu init_on_cpu;
 	int rc;
 
-	if (!cpu_online(pol->cpu))
-		return -ENODEV;
-
 	smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
 	if (rc)
 		return -ENODEV;
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
similarity index 96%
rename from arch/powerpc/platforms/cell/cbe_cpufreq.c
rename to drivers/cpufreq/ppc_cbe_cpufreq.c
index d4c39e3..e577a1d 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -27,7 +27,8 @@
 #include <asm/machdep.h>
 #include <asm/prom.h>
 #include <asm/cell-regs.h>
-#include "cbe_cpufreq.h"
+
+#include "ppc_cbe_cpufreq.h"
 
 static DEFINE_MUTEX(cbe_switch_mutex);
 
@@ -156,10 +157,9 @@
 
 	freqs.old = policy->cur;
 	freqs.new = cbe_freqs[cbe_pmode_new].frequency;
-	freqs.cpu = policy->cpu;
 
 	mutex_lock(&cbe_switch_mutex);
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	pr_debug("setting frequency for cpu %d to %d kHz, " \
 		 "1/%d of max frequency\n",
@@ -169,7 +169,7 @@
 
 	rc = set_pmode(policy->cpu, cbe_pmode_new);
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 	mutex_unlock(&cbe_switch_mutex);
 
 	return rc;
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h
similarity index 82%
rename from arch/powerpc/platforms/cell/cbe_cpufreq.h
rename to drivers/cpufreq/ppc_cbe_cpufreq.h
index c1d86bf..b4c00a5 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.h
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.h
@@ -1,5 +1,5 @@
 /*
- * cbe_cpufreq.h
+ * ppc_cbe_cpufreq.h
  *
  * This file contains the definitions used by the cbe_cpufreq driver.
  *
@@ -17,7 +17,7 @@
 
 int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
 
-#if defined(CONFIG_CBE_CPUFREQ_PMI) || defined(CONFIG_CBE_CPUFREQ_PMI_MODULE)
+#if defined(CONFIG_CPU_FREQ_CBE_PMI) || defined(CONFIG_CPU_FREQ_CBE_PMI_MODULE)
 extern bool cbe_cpufreq_has_pmi;
 #else
 #define cbe_cpufreq_has_pmi (0)
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c b/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
similarity index 98%
rename from arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
rename to drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
index 20472e4..84d2f2c 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pervasive.c
@@ -30,7 +30,7 @@
 #include <asm/hw_irq.h>
 #include <asm/cell-regs.h>
 
-#include "cbe_cpufreq.h"
+#include "ppc_cbe_cpufreq.h"
 
 /* to write to MIC register */
 static u64 MIC_Slow_Fast_Timer_table[] = {
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
similarity index 98%
rename from arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
rename to drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
index 60a07a4..d29e8da 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -35,7 +35,7 @@
 #include <asm/time.h>
 #endif
 
-#include "cbe_cpufreq.h"
+#include "ppc_cbe_cpufreq.h"
 
 static u8 pmi_slow_mode_limit[MAX_CBE];
 
diff --git a/arch/arm/mach-pxa/cpufreq-pxa2xx.c b/drivers/cpufreq/pxa2xx-cpufreq.c
similarity index 97%
rename from arch/arm/mach-pxa/cpufreq-pxa2xx.c
rename to drivers/cpufreq/pxa2xx-cpufreq.c
index 6a7aeab..9e5bc8e 100644
--- a/arch/arm/mach-pxa/cpufreq-pxa2xx.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -1,6 +1,4 @@
 /*
- *  linux/arch/arm/mach-pxa/cpufreq-pxa2xx.c
- *
  *  Copyright (C) 2002,2003 Intrinsyc Software
  *
  * This program is free software; you can redistribute it and/or modify
@@ -223,10 +221,11 @@
 			*pxa_freqs = pxa255_turbo_freqs;
 			*freq_table = pxa255_turbo_freq_table;
 		}
-	}
-	if (cpu_is_pxa27x()) {
+	} else if (cpu_is_pxa27x()) {
 		*pxa_freqs = pxa27x_freqs;
 		*freq_table = pxa27x_freq_table;
+	} else {
+		BUG();
 	}
 }
 
@@ -311,7 +310,6 @@
 	new_freq_mem = pxa_freq_settings[idx].membus;
 	freqs.old = policy->cur;
 	freqs.new = new_freq_cpu;
-	freqs.cpu = policy->cpu;
 
 	if (freq_debug)
 		pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n",
@@ -327,7 +325,7 @@
 	 * you should add a notify client with any platform specific
 	 * Vcc changing capability
 	 */
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	/* Calculate the next MDREFR.  If we're slowing down the SDRAM clock
 	 * we need to preset the smaller DRI before the change.	 If we're
@@ -382,7 +380,7 @@
 	 * you should add a notify client with any platform specific
 	 * SDRAM refresh timer adjustments
 	 */
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	/*
 	 * Even if voltage setting fails, we don't report it, as the frequency
diff --git a/arch/arm/mach-pxa/cpufreq-pxa3xx.c b/drivers/cpufreq/pxa3xx-cpufreq.c
similarity index 95%
rename from arch/arm/mach-pxa/cpufreq-pxa3xx.c
rename to drivers/cpufreq/pxa3xx-cpufreq.c
index b85b4ab..15d60f8 100644
--- a/arch/arm/mach-pxa/cpufreq-pxa3xx.c
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -1,6 +1,4 @@
 /*
- * linux/arch/arm/mach-pxa/cpufreq-pxa3xx.c
- *
  * Copyright (C) 2008 Marvell International Ltd.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -17,10 +15,9 @@
 #include <linux/slab.h>
 #include <linux/io.h>
 
+#include <mach/generic.h>
 #include <mach/pxa3xx-regs.h>
 
-#include "generic.h"
-
 #define HSS_104M	(0)
 #define HSS_156M	(1)
 #define HSS_208M	(2)
@@ -184,7 +181,6 @@
 
 	freqs.old = policy->cur;
 	freqs.new = next->cpufreq_mhz * 1000;
-	freqs.cpu = policy->cpu;
 
 	pr_debug("CPU frequency from %d MHz to %d MHz%s\n",
 			freqs.old / 1000, freqs.new / 1000,
@@ -193,14 +189,14 @@
 	if (freqs.old == target_freq)
 		return 0;
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	local_irq_save(flags);
 	__update_core_freq(next);
 	__update_bus_freq(next);
 	local_irq_restore(flags);
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	return 0;
 }
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index bcc053b..4f1881e 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -256,7 +256,6 @@
 		goto out;
 	}
 
-	freqs.cpu = 0;
 	freqs.flags = 0;
 	freqs.old = s3c_freq->is_dvs ? FREQ_DVS
 				     : clk_get_rate(s3c_freq->armclk) / 1000;
@@ -274,7 +273,7 @@
 	if (!to_dvs && freqs.old == freqs.new)
 		goto out;
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	if (to_dvs) {
 		pr_debug("cpufreq: enter dvs\n");
@@ -287,7 +286,7 @@
 		ret = s3c2416_cpufreq_set_armdiv(s3c_freq, freqs.new);
 	}
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 out:
 	mutex_unlock(&cpufreq_lock);
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 6f9490b..27cacb5 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -84,7 +84,6 @@
 	if (ret != 0)
 		return ret;
 
-	freqs.cpu = 0;
 	freqs.old = clk_get_rate(armclk) / 1000;
 	freqs.new = s3c64xx_freq_table[i].frequency;
 	freqs.flags = 0;
@@ -95,7 +94,7 @@
 
 	pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new);
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 #ifdef CONFIG_REGULATOR
 	if (vddarm && freqs.new > freqs.old) {
@@ -117,7 +116,7 @@
 		goto err;
 	}
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 #ifdef CONFIG_REGULATOR
 	if (vddarm && freqs.new < freqs.old) {
@@ -141,7 +140,7 @@
 	if (clk_set_rate(armclk, freqs.old * 1000) < 0)
 		pr_err("Failed to restore original clock rate\n");
 err:
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	return ret;
 }
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index a484aae..5c77570 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -229,7 +229,6 @@
 	}
 
 	freqs.new = s5pv210_freq_table[index].frequency;
-	freqs.cpu = 0;
 
 	if (freqs.new == freqs.old)
 		goto exit;
@@ -256,7 +255,7 @@
 			goto exit;
 	}
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	/* Check if there need to change PLL */
 	if ((index == L0) || (priv_index == L0))
@@ -468,7 +467,7 @@
 		}
 	}
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	if (freqs.new < freqs.old) {
 		regulator_set_voltage(int_regulator,
diff --git a/arch/arm/mach-sa1100/cpu-sa1100.c b/drivers/cpufreq/sa1100-cpufreq.c
similarity index 97%
rename from arch/arm/mach-sa1100/cpu-sa1100.c
rename to drivers/cpufreq/sa1100-cpufreq.c
index e8f4d1e..cff18e8 100644
--- a/arch/arm/mach-sa1100/cpu-sa1100.c
+++ b/drivers/cpufreq/sa1100-cpufreq.c
@@ -91,10 +91,9 @@
 
 #include <asm/cputype.h>
 
+#include <mach/generic.h>
 #include <mach/hardware.h>
 
-#include "generic.h"
-
 struct sa1100_dram_regs {
 	int speed;
 	u32 mdcnfg;
@@ -201,9 +200,8 @@
 
 	freqs.old = cur;
 	freqs.new = sa11x0_ppcr_to_freq(new_ppcr);
-	freqs.cpu = 0;
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	if (freqs.new > cur)
 		sa1100_update_dram_timings(cur, freqs.new);
@@ -213,7 +211,7 @@
 	if (freqs.new < cur)
 		sa1100_update_dram_timings(cur, freqs.new);
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	return 0;
 }
diff --git a/arch/arm/mach-sa1100/cpu-sa1110.c b/drivers/cpufreq/sa1110-cpufreq.c
similarity index 97%
rename from arch/arm/mach-sa1100/cpu-sa1110.c
rename to drivers/cpufreq/sa1110-cpufreq.c
index 48c45b0..39c90b6 100644
--- a/arch/arm/mach-sa1100/cpu-sa1110.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -27,10 +27,9 @@
 #include <asm/cputype.h>
 #include <asm/mach-types.h>
 
+#include <mach/generic.h>
 #include <mach/hardware.h>
 
-#include "generic.h"
-
 #undef DEBUG
 
 struct sdram_params {
@@ -258,7 +257,6 @@
 
 	freqs.old = sa11x0_getspeed(0);
 	freqs.new = sa11x0_ppcr_to_freq(ppcr);
-	freqs.cpu = 0;
 
 	sdram_calculate_timing(&sd, freqs.new, sdram);
 
@@ -279,7 +277,7 @@
 	sd.mdcas[2] = 0xaaaaaaaa;
 #endif
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	/*
 	 * The clock could be going away for some time.  Set the SDRAMs
@@ -327,7 +325,7 @@
 	 */
 	sdram_update_refresh(freqs.new, sdram);
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	return 0;
 }
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index e42e073..f740b13 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -53,7 +53,8 @@
 	}
 }
 
-static void sc520_freq_set_cpu_state(unsigned int state)
+static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy,
+		unsigned int state)
 {
 
 	struct cpufreq_freqs	freqs;
@@ -61,9 +62,8 @@
 
 	freqs.old = sc520_freq_get_cpu_frequency(0);
 	freqs.new = sc520_freq_table[state].frequency;
-	freqs.cpu = 0; /* AMD Elan is UP */
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	pr_debug("attempting to set frequency to %i kHz\n",
 			sc520_freq_table[state].frequency);
@@ -75,7 +75,7 @@
 
 	local_irq_enable();
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 };
 
 static int sc520_freq_verify(struct cpufreq_policy *policy)
@@ -93,7 +93,7 @@
 				target_freq, relation, &newstate))
 		return -EINVAL;
 
-	sc520_freq_set_cpu_state(newstate);
+	sc520_freq_set_cpu_state(policy, newstate);
 
 	return 0;
 }
diff --git a/arch/sh/kernel/cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
similarity index 90%
rename from arch/sh/kernel/cpufreq.c
rename to drivers/cpufreq/sh-cpufreq.c
index e68b45b..73adb64 100644
--- a/arch/sh/kernel/cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -1,6 +1,4 @@
 /*
- * arch/sh/kernel/cpufreq.c
- *
  * cpufreq driver for the SuperH processors.
  *
  * Copyright (C) 2002 - 2012 Paul Mundt
@@ -51,9 +49,6 @@
 	struct device *dev;
 	long freq;
 
-	if (!cpu_online(cpu))
-		return -ENODEV;
-
 	cpus_allowed = current->cpus_allowed;
 	set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
@@ -69,15 +64,14 @@
 
 	dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000);
 
-	freqs.cpu	= cpu;
 	freqs.old	= sh_cpufreq_get(cpu);
 	freqs.new	= (freq + 500) / 1000;
 	freqs.flags	= 0;
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 	set_cpus_allowed_ptr(current, &cpus_allowed);
 	clk_set_rate(cpuclk, freq);
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	dev_dbg(dev, "set frequency %lu Hz\n", freq);
 
@@ -112,9 +106,6 @@
 	struct cpufreq_frequency_table *freq_table;
 	struct device *dev;
 
-	if (!cpu_online(cpu))
-		return -ENODEV;
-
 	dev = get_cpu_device(cpu);
 
 	cpuclk = clk_get(dev, "cpu_clk");
@@ -123,7 +114,7 @@
 		return PTR_ERR(cpuclk);
 	}
 
-	policy->cur = policy->min = policy->max = sh_cpufreq_get(cpu);
+	policy->cur = sh_cpufreq_get(cpu);
 
 	freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
 	if (freq_table) {
@@ -136,15 +127,12 @@
 		dev_notice(dev, "no frequency table found, falling back "
 			   "to rate rounding.\n");
 
-		policy->cpuinfo.min_freq =
+		policy->min = policy->cpuinfo.min_freq =
 			(clk_round_rate(cpuclk, 1) + 500) / 1000;
-		policy->cpuinfo.max_freq =
+		policy->max = policy->cpuinfo.max_freq =
 			(clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
 	}
 
-	policy->min = policy->cpuinfo.min_freq;
-	policy->max = policy->cpuinfo.max_freq;
-
 	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
 
 	dev_info(dev, "CPU Frequencies - Minimum %u.%03u MHz, "
diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
similarity index 95%
rename from arch/sparc/kernel/us2e_cpufreq.c
rename to drivers/cpufreq/sparc-us2e-cpufreq.c
index 489fc15..306ae46 100644
--- a/arch/sparc/kernel/us2e_cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -234,9 +234,6 @@
 	cpumask_t cpus_allowed;
 	unsigned long clock_tick, estar;
 
-	if (!cpu_online(cpu))
-		return 0;
-
 	cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
 	set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
@@ -248,16 +245,15 @@
 	return clock_tick / estar_to_divisor(estar);
 }
 
-static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
+static void us2e_set_cpu_divider_index(struct cpufreq_policy *policy,
+		unsigned int index)
 {
+	unsigned int cpu = policy->cpu;
 	unsigned long new_bits, new_freq;
 	unsigned long clock_tick, divisor, old_divisor, estar;
 	cpumask_t cpus_allowed;
 	struct cpufreq_freqs freqs;
 
-	if (!cpu_online(cpu))
-		return;
-
 	cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
 	set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
@@ -272,14 +268,13 @@
 
 	freqs.old = clock_tick / old_divisor;
 	freqs.new = new_freq;
-	freqs.cpu = cpu;
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	if (old_divisor != divisor)
 		us2e_transition(estar, new_bits, clock_tick * 1000,
 				old_divisor, divisor);
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	set_cpus_allowed_ptr(current, &cpus_allowed);
 }
@@ -295,7 +290,7 @@
 					   target_freq, relation, &new_index))
 		return -EINVAL;
 
-	us2e_set_cpu_divider_index(policy->cpu, new_index);
+	us2e_set_cpu_divider_index(policy, new_index);
 
 	return 0;
 }
@@ -335,7 +330,7 @@
 static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
 {
 	if (cpufreq_us2e_driver)
-		us2e_set_cpu_divider_index(policy->cpu, 0);
+		us2e_set_cpu_divider_index(policy, 0);
 
 	return 0;
 }
diff --git a/arch/sparc/kernel/us3_cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
similarity index 93%
rename from arch/sparc/kernel/us3_cpufreq.c
rename to drivers/cpufreq/sparc-us3-cpufreq.c
index eb1624b..c71ee14 100644
--- a/arch/sparc/kernel/us3_cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -82,9 +82,6 @@
 	unsigned long reg;
 	unsigned int ret;
 
-	if (!cpu_online(cpu))
-		return 0;
-
 	cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
 	set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
@@ -96,15 +93,14 @@
 	return ret;
 }
 
-static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
+static void us3_set_cpu_divider_index(struct cpufreq_policy *policy,
+		unsigned int index)
 {
+	unsigned int cpu = policy->cpu;
 	unsigned long new_bits, new_freq, reg;
 	cpumask_t cpus_allowed;
 	struct cpufreq_freqs freqs;
 
-	if (!cpu_online(cpu))
-		return;
-
 	cpumask_copy(&cpus_allowed, tsk_cpus_allowed(current));
 	set_cpus_allowed_ptr(current, cpumask_of(cpu));
 
@@ -131,14 +127,13 @@
 
 	freqs.old = get_current_freq(cpu, reg);
 	freqs.new = new_freq;
-	freqs.cpu = cpu;
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	reg &= ~SAFARI_CFG_DIV_MASK;
 	reg |= new_bits;
 	write_safari_cfg(reg);
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	set_cpus_allowed_ptr(current, &cpus_allowed);
 }
@@ -156,7 +151,7 @@
 					   &new_index))
 		return -EINVAL;
 
-	us3_set_cpu_divider_index(policy->cpu, new_index);
+	us3_set_cpu_divider_index(policy, new_index);
 
 	return 0;
 }
@@ -192,7 +187,7 @@
 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
 {
 	if (cpufreq_us3_driver)
-		us3_set_cpu_divider_index(policy->cpu, 0);
+		us3_set_cpu_divider_index(policy, 0);
 
 	return 0;
 }
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 7e4d773..156829f 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -121,7 +121,6 @@
 				target_freq, relation, &index))
 		return -EINVAL;
 
-	freqs.cpu = policy->cpu;
 	freqs.old = spear_cpufreq_get(0);
 
 	newfreq = spear_cpufreq.freq_tbl[index].frequency * 1000;
@@ -158,8 +157,7 @@
 	freqs.new = newfreq / 1000;
 	freqs.new /= mult;
 
-	for_each_cpu(freqs.cpu, policy->cpus)
-		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	if (mult == 2)
 		ret = spear1340_set_cpu_rate(srcclk, newfreq);
@@ -172,8 +170,7 @@
 		freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000;
 	}
 
-	for_each_cpu(freqs.cpu, policy->cpus)
-		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 	return ret;
 }
 
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 3a953d5..618e6f4 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -457,7 +457,7 @@
 	unsigned int	msr, oldmsr = 0, h = 0, cpu = policy->cpu;
 	struct cpufreq_freqs	freqs;
 	int			retval = 0;
-	unsigned int		j, k, first_cpu, tmp;
+	unsigned int		j, first_cpu, tmp;
 	cpumask_var_t covered_cpus;
 
 	if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)))
@@ -481,10 +481,6 @@
 	for_each_cpu(j, policy->cpus) {
 		int good_cpu;
 
-		/* cpufreq holds the hotplug lock, so we are safe here */
-		if (!cpu_online(j))
-			continue;
-
 		/*
 		 * Support for SMP systems.
 		 * Make sure we are running on CPU that wants to change freq
@@ -522,13 +518,8 @@
 			pr_debug("target=%dkHz old=%d new=%d msr=%04x\n",
 				target_freq, freqs.old, freqs.new, msr);
 
-			for_each_cpu(k, policy->cpus) {
-				if (!cpu_online(k))
-					continue;
-				freqs.cpu = k;
-				cpufreq_notify_transition(&freqs,
+			cpufreq_notify_transition(policy, &freqs,
 					CPUFREQ_PRECHANGE);
-			}
 
 			first_cpu = 0;
 			/* all but 16 LSB are reserved, treat them with care */
@@ -544,12 +535,7 @@
 		cpumask_set_cpu(j, covered_cpus);
 	}
 
-	for_each_cpu(k, policy->cpus) {
-		if (!cpu_online(k))
-			continue;
-		freqs.cpu = k;
-		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-	}
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	if (unlikely(retval)) {
 		/*
@@ -565,12 +551,8 @@
 		tmp = freqs.new;
 		freqs.new = freqs.old;
 		freqs.old = tmp;
-		for_each_cpu(j, policy->cpus) {
-			if (!cpu_online(j))
-				continue;
-			cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-			cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-		}
+		cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+		cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 	}
 	retval = 0;
 
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index e29b59a..e2e5aa9 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -263,7 +263,6 @@
 {
 	unsigned int newstate = 0, policy_cpu;
 	struct cpufreq_freqs freqs;
-	int i;
 
 	if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
 				target_freq, relation, &newstate))
@@ -272,7 +271,6 @@
 	policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
 	freqs.old = speedstep_get(policy_cpu);
 	freqs.new = speedstep_freqs[newstate].frequency;
-	freqs.cpu = policy->cpu;
 
 	pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new);
 
@@ -280,18 +278,12 @@
 	if (freqs.old == freqs.new)
 		return 0;
 
-	for_each_cpu(i, policy->cpus) {
-		freqs.cpu = i;
-		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-	}
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate,
 				 true);
 
-	for_each_cpu(i, policy->cpus) {
-		freqs.cpu = i;
-		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-	}
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	return 0;
 }
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 6a457fc..f5a6b70 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -252,14 +252,13 @@
 
 	freqs.old = speedstep_freqs[speedstep_get_state()].frequency;
 	freqs.new = speedstep_freqs[newstate].frequency;
-	freqs.cpu = 0; /* speedstep.c is UP only driver */
 
 	if (freqs.old == freqs.new)
 		return 0;
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 	speedstep_set_state(newstate);
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	return 0;
 }
diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/drivers/cpufreq/tegra-cpufreq.c
similarity index 92%
rename from arch/arm/mach-tegra/cpu-tegra.c
rename to drivers/cpufreq/tegra-cpufreq.c
index e3d6e15..c74c0e1 100644
--- a/arch/arm/mach-tegra/cpu-tegra.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -1,6 +1,4 @@
 /*
- * arch/arm/mach-tegra/cpu-tegra.c
- *
  * Copyright (C) 2010 Google, Inc.
  *
  * Author:
@@ -106,7 +104,8 @@
 	return ret;
 }
 
-static int tegra_update_cpu_speed(unsigned long rate)
+static int tegra_update_cpu_speed(struct cpufreq_policy *policy,
+		unsigned long rate)
 {
 	int ret = 0;
 	struct cpufreq_freqs freqs;
@@ -128,8 +127,7 @@
 	else
 		clk_set_rate(emc_clk, 100000000);  /* emc 50Mhz */
 
-	for_each_online_cpu(freqs.cpu)
-		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 #ifdef CONFIG_CPU_FREQ_DEBUG
 	printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n",
@@ -143,8 +141,7 @@
 		return ret;
 	}
 
-	for_each_online_cpu(freqs.cpu)
-		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	return 0;
 }
@@ -181,7 +178,7 @@
 
 	target_cpu_speed[policy->cpu] = freq;
 
-	ret = tegra_update_cpu_speed(tegra_cpu_highest_speed());
+	ret = tegra_update_cpu_speed(policy, tegra_cpu_highest_speed());
 
 out:
 	mutex_unlock(&tegra_cpu_lock);
@@ -193,10 +190,12 @@
 {
 	mutex_lock(&tegra_cpu_lock);
 	if (event == PM_SUSPEND_PREPARE) {
+		struct cpufreq_policy *policy = cpufreq_cpu_get(0);
 		is_suspended = true;
 		pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n",
 			freq_table[0].frequency);
-		tegra_update_cpu_speed(freq_table[0].frequency);
+		tegra_update_cpu_speed(policy, freq_table[0].frequency);
+		cpufreq_cpu_put(policy);
 	} else if (event == PM_POST_SUSPEND) {
 		is_suspended = false;
 	}
diff --git a/arch/unicore32/kernel/cpu-ucv2.c b/drivers/cpufreq/unicore2-cpufreq.c
similarity index 89%
rename from arch/unicore32/kernel/cpu-ucv2.c
rename to drivers/cpufreq/unicore2-cpufreq.c
index 4a99f62..12fc904 100644
--- a/arch/unicore32/kernel/cpu-ucv2.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -1,5 +1,5 @@
 /*
- * linux/arch/unicore32/kernel/cpu-ucv2.c: clock scaling for the UniCore-II
+ * clock scaling for the UniCore-II
  *
  * Code specific to PKUnity SoC and UniCore ISA
  *
@@ -52,15 +52,14 @@
 	struct cpufreq_freqs freqs;
 	struct clk *mclk = clk_get(NULL, "MAIN_CLK");
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
 
 	if (!clk_set_rate(mclk, target_freq * 1000)) {
 		freqs.old = cur;
 		freqs.new = target_freq;
-		freqs.cpu = 0;
 	}
 
-	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+	cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 
 	return 0;
 }
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 071e2c3..c4cc27e 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -39,10 +39,4 @@
 	help
 	  Select this to enable cpuidle on Calxeda processors.
 
-config CPU_IDLE_KIRKWOOD
-	bool "CPU Idle Driver for Kirkwood processors"
-	depends on ARCH_KIRKWOOD
-	help
-	  Select this to enable cpuidle on Kirkwood processors.
-
 endif
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 24c6e7d..0d8bd55 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -6,4 +6,4 @@
 obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
 
 obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
-obj-$(CONFIG_CPU_IDLE_KIRKWOOD) += cpuidle-kirkwood.o
+obj-$(CONFIG_ARCH_KIRKWOOD) += cpuidle-kirkwood.o
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index e1aab38..2233791 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -1,7 +1,7 @@
 /*
  * Copyright 2012 Calxeda, Inc.
  *
- * Based on arch/arm/plat-mxc/cpuidle.c:
+ * Based on arch/arm/plat-mxc/cpuidle.c: #v3.7
  * Copyright 2012 Freescale Semiconductor, Inc.
  * Copyright 2012 Linaro Ltd.
  *
@@ -16,6 +16,8 @@
  *
  * You should have received a copy of the GNU General Public License along with
  * this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Maintainer: Rob Herring <rob.herring@calxeda.com>
  */
 
 #include <linux/cpuidle.h>
@@ -35,8 +37,6 @@
 extern void highbank_set_cpu_jump(int cpu, void *jump_addr);
 extern void *scu_base_addr;
 
-static struct cpuidle_device __percpu *calxeda_idle_cpuidle_devices;
-
 static inline unsigned int get_auxcr(void)
 {
 	unsigned int val;
@@ -85,22 +85,8 @@
 	return index;
 }
 
-static void calxeda_idle_cpuidle_devices_uninit(void)
-{
-	int i;
-	struct cpuidle_device *dev;
-
-	for_each_possible_cpu(i) {
-		dev = per_cpu_ptr(calxeda_idle_cpuidle_devices, i);
-		cpuidle_unregister_device(dev);
-	}
-
-	free_percpu(calxeda_idle_cpuidle_devices);
-}
-
 static struct cpuidle_driver calxeda_idle_driver = {
 	.name = "calxeda_idle",
-	.en_core_tk_irqen = 1,
 	.states = {
 		ARM_CPUIDLE_WFI_STATE,
 		{
@@ -118,44 +104,9 @@
 
 static int __init calxeda_cpuidle_init(void)
 {
-	int cpu_id;
-	int ret;
-	struct cpuidle_device *dev;
-	struct cpuidle_driver *drv = &calxeda_idle_driver;
-
 	if (!of_machine_is_compatible("calxeda,highbank"))
 		return -ENODEV;
 
-	ret = cpuidle_register_driver(drv);
-	if (ret)
-		return ret;
-
-	calxeda_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
-	if (calxeda_idle_cpuidle_devices == NULL) {
-		ret = -ENOMEM;
-		goto unregister_drv;
-	}
-
-	/* initialize state data for each cpuidle_device */
-	for_each_possible_cpu(cpu_id) {
-		dev = per_cpu_ptr(calxeda_idle_cpuidle_devices, cpu_id);
-		dev->cpu = cpu_id;
-		dev->state_count = drv->state_count;
-
-		ret = cpuidle_register_device(dev);
-		if (ret) {
-			pr_err("Failed to register cpu %u, error: %d\n",
-			       cpu_id, ret);
-			goto uninit;
-		}
-	}
-
-	return 0;
-
-uninit:
-	calxeda_idle_cpuidle_devices_uninit();
-unregister_drv:
-	cpuidle_unregister_driver(drv);
-	return ret;
+	return cpuidle_register(&calxeda_idle_driver, NULL);
 }
 module_init(calxeda_cpuidle_init);
diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c
index 670aa1e..521b0a7 100644
--- a/drivers/cpuidle/cpuidle-kirkwood.c
+++ b/drivers/cpuidle/cpuidle-kirkwood.c
@@ -1,6 +1,4 @@
 /*
- * arch/arm/mach-kirkwood/cpuidle.c
- *
  * CPU idle Marvell Kirkwood SoCs
  *
  * This file is licensed under the terms of the GNU General Public
@@ -11,6 +9,9 @@
  * to implement two idle states -
  * #1 wait-for-interrupt
  * #2 wait-for-interrupt and DDR self refresh
+ *
+ * Maintainer: Jason Cooper <jason@lakedaemon.net>
+ * Maintainer: Andrew Lunn <andrew@lunn.ch>
  */
 
 #include <linux/kernel.h>
@@ -41,7 +42,6 @@
 static struct cpuidle_driver kirkwood_idle_driver = {
 	.name			= "kirkwood_idle",
 	.owner			= THIS_MODULE,
-	.en_core_tk_irqen	= 1,
 	.states[0]		= ARM_CPUIDLE_WFI_STATE,
 	.states[1]		= {
 		.enter			= kirkwood_enter_idle,
@@ -53,9 +53,6 @@
 	},
 	.state_count = KIRKWOOD_MAX_STATES,
 };
-static struct cpuidle_device *device;
-
-static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
 
 /* Initialize CPU idle by registering the idle states */
 static int kirkwood_cpuidle_probe(struct platform_device *pdev)
@@ -66,26 +63,16 @@
 	if (res == NULL)
 		return -EINVAL;
 
-	ddr_operation_base = devm_request_and_ioremap(&pdev->dev, res);
-	if (!ddr_operation_base)
-		return -EADDRNOTAVAIL;
+	ddr_operation_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ddr_operation_base))
+		return PTR_ERR(ddr_operation_base);
 
-	device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id());
-	device->state_count = KIRKWOOD_MAX_STATES;
-
-	cpuidle_register_driver(&kirkwood_idle_driver);
-	if (cpuidle_register_device(device)) {
-		pr_err("kirkwood_init_cpuidle: Failed registering\n");
-		return -EIO;
-	}
-	return 0;
+	return cpuidle_register(&kirkwood_idle_driver, NULL);
 }
 
 int kirkwood_cpuidle_remove(struct platform_device *pdev)
 {
-	cpuidle_unregister_device(device);
-	cpuidle_unregister_driver(&kirkwood_idle_driver);
-
+	cpuidle_unregister(&kirkwood_idle_driver);
 	return 0;
 }
 
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index eba6929..c3a93fe 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -8,6 +8,7 @@
  * This code is licenced under the GPL.
  */
 
+#include <linux/clockchips.h>
 #include <linux/kernel.h>
 #include <linux/mutex.h>
 #include <linux/sched.h>
@@ -23,6 +24,7 @@
 #include "cpuidle.h"
 
 DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
+DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
 
 DEFINE_MUTEX(cpuidle_lock);
 LIST_HEAD(cpuidle_detected_devices);
@@ -42,24 +44,6 @@
 
 static int __cpuidle_register_device(struct cpuidle_device *dev);
 
-static inline int cpuidle_enter(struct cpuidle_device *dev,
-				struct cpuidle_driver *drv, int index)
-{
-	struct cpuidle_state *target_state = &drv->states[index];
-	return target_state->enter(dev, drv, index);
-}
-
-static inline int cpuidle_enter_tk(struct cpuidle_device *dev,
-			       struct cpuidle_driver *drv, int index)
-{
-	return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter);
-}
-
-typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev,
-			       struct cpuidle_driver *drv, int index);
-
-static cpuidle_enter_t cpuidle_enter_ops;
-
 /**
  * cpuidle_play_dead - cpu off-lining
  *
@@ -89,11 +73,27 @@
  * @next_state: index into drv->states of the state to enter
  */
 int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
-		int next_state)
+			int index)
 {
 	int entered_state;
 
-	entered_state = cpuidle_enter_ops(dev, drv, next_state);
+	struct cpuidle_state *target_state = &drv->states[index];
+	ktime_t time_start, time_end;
+	s64 diff;
+
+	time_start = ktime_get();
+
+	entered_state = target_state->enter(dev, drv, index);
+
+	time_end = ktime_get();
+
+	local_irq_enable();
+
+	diff = ktime_to_us(ktime_sub(time_end, time_start));
+	if (diff > INT_MAX)
+		diff = INT_MAX;
+
+	dev->last_residency = (int) diff;
 
 	if (entered_state >= 0) {
 		/* Update cpuidle counters */
@@ -146,12 +146,20 @@
 
 	trace_cpu_idle_rcuidle(next_state, dev->cpu);
 
+	if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
+		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
+				   &dev->cpu);
+
 	if (cpuidle_state_is_coupled(dev, drv, next_state))
 		entered_state = cpuidle_enter_state_coupled(dev, drv,
 							    next_state);
 	else
 		entered_state = cpuidle_enter_state(dev, drv, next_state);
 
+	if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
+		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
+				   &dev->cpu);
+
 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 
 	/* give the governor an opportunity to reflect on the outcome */
@@ -222,37 +230,6 @@
 	mutex_unlock(&cpuidle_lock);
 }
 
-/**
- * cpuidle_wrap_enter - performs timekeeping and irqen around enter function
- * @dev: pointer to a valid cpuidle_device object
- * @drv: pointer to a valid cpuidle_driver object
- * @index: index of the target cpuidle state.
- */
-int cpuidle_wrap_enter(struct cpuidle_device *dev,
-				struct cpuidle_driver *drv, int index,
-				int (*enter)(struct cpuidle_device *dev,
-					struct cpuidle_driver *drv, int index))
-{
-	ktime_t time_start, time_end;
-	s64 diff;
-
-	time_start = ktime_get();
-
-	index = enter(dev, drv, index);
-
-	time_end = ktime_get();
-
-	local_irq_enable();
-
-	diff = ktime_to_us(ktime_sub(time_end, time_start));
-	if (diff > INT_MAX)
-		diff = INT_MAX;
-
-	dev->last_residency = (int) diff;
-
-	return index;
-}
-
 #ifdef CONFIG_ARCH_HAS_CPU_RELAX
 static int poll_idle(struct cpuidle_device *dev,
 		struct cpuidle_driver *drv, int index)
@@ -324,9 +301,6 @@
 			return ret;
 	}
 
-	cpuidle_enter_ops = drv->en_core_tk_irqen ?
-		cpuidle_enter_tk : cpuidle_enter;
-
 	poll_idle_init(drv);
 
 	ret = cpuidle_add_device_sysfs(dev);
@@ -480,6 +454,77 @@
 
 EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
 
+/**
+ * cpuidle_unregister: unregister a driver and the devices. This function
+ * can be used only if the driver has been previously registered through
+ * the cpuidle_register function.
+ *
+ * @drv: a valid pointer to a struct cpuidle_driver
+ */
+void cpuidle_unregister(struct cpuidle_driver *drv)
+{
+	int cpu;
+	struct cpuidle_device *device;
+
+	for_each_possible_cpu(cpu) {
+		device = &per_cpu(cpuidle_dev, cpu);
+		cpuidle_unregister_device(device);
+	}
+
+	cpuidle_unregister_driver(drv);
+}
+EXPORT_SYMBOL_GPL(cpuidle_unregister);
+
+/**
+ * cpuidle_register: registers the driver and the cpu devices with the
+ * coupled_cpus passed as parameter. This function is used for all common
+ * initialization pattern there are in the arch specific drivers. The
+ * devices is globally defined in this file.
+ *
+ * @drv         : a valid pointer to a struct cpuidle_driver
+ * @coupled_cpus: a cpumask for the coupled states
+ *
+ * Returns 0 on success, < 0 otherwise
+ */
+int cpuidle_register(struct cpuidle_driver *drv,
+		     const struct cpumask *const coupled_cpus)
+{
+	int ret, cpu;
+	struct cpuidle_device *device;
+
+	ret = cpuidle_register_driver(drv);
+	if (ret) {
+		pr_err("failed to register cpuidle driver\n");
+		return ret;
+	}
+
+	for_each_possible_cpu(cpu) {
+		device = &per_cpu(cpuidle_dev, cpu);
+		device->cpu = cpu;
+
+#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+		/*
+		 * On multiplatform for ARM, the coupled idle states could
+		 * enabled in the kernel even if the cpuidle driver does not
+		 * use it. Note, coupled_cpus is a struct copy.
+		 */
+		if (coupled_cpus)
+			device->coupled_cpus = *coupled_cpus;
+#endif
+		ret = cpuidle_register_device(device);
+		if (!ret)
+			continue;
+
+		pr_err("Failed to register cpuidle device for cpu%d\n", cpu);
+
+		cpuidle_unregister(drv);
+		break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cpuidle_register);
+
 #ifdef CONFIG_SMP
 
 static void smp_callback(void *v)
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 422c7b6..8dfaaae 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -11,6 +11,8 @@
 #include <linux/mutex.h>
 #include <linux/module.h>
 #include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/clockchips.h>
 
 #include "cpuidle.h"
 
@@ -19,9 +21,28 @@
 static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu);
 static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu);
 
-static void __cpuidle_driver_init(struct cpuidle_driver *drv)
+static void cpuidle_setup_broadcast_timer(void *arg)
 {
+	int cpu = smp_processor_id();
+	clockevents_notify((long)(arg), &cpu);
+}
+
+static void __cpuidle_driver_init(struct cpuidle_driver *drv, int cpu)
+{
+	int i;
+
 	drv->refcnt = 0;
+
+	for (i = drv->state_count - 1; i >= 0 ; i--) {
+
+		if (!(drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP))
+			continue;
+
+		drv->bctimer = 1;
+		on_each_cpu_mask(get_cpu_mask(cpu), cpuidle_setup_broadcast_timer,
+				 (void *)CLOCK_EVT_NOTIFY_BROADCAST_ON, 1);
+		break;
+	}
 }
 
 static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)
@@ -35,7 +56,7 @@
 	if (__cpuidle_get_cpu_driver(cpu))
 		return -EBUSY;
 
-	__cpuidle_driver_init(drv);
+	__cpuidle_driver_init(drv, cpu);
 
 	__cpuidle_set_cpu_driver(drv, cpu);
 
@@ -49,6 +70,12 @@
 
 	if (!WARN_ON(drv->refcnt > 0))
 		__cpuidle_set_cpu_driver(NULL, cpu);
+
+	if (drv->bctimer) {
+		drv->bctimer = 0;
+		on_each_cpu_mask(get_cpu_mask(cpu), cpuidle_setup_broadcast_timer,
+				 (void *)CLOCK_EVT_NOTIFY_BROADCAST_OFF, 1);
+	}
 }
 
 #ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 4d33874..a8117e6 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -350,11 +350,11 @@
 	if (!handle || acpi_bus_get_device(handle, &acpi_dev))
 		return;
 
-	if (acpi_is_video_device(acpi_dev))
+	if (acpi_is_video_device(handle))
 		acpi_video_bus = acpi_dev;
 	else {
 		list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
-			if (acpi_is_video_device(acpi_cdev)) {
+			if (acpi_is_video_device(acpi_cdev->handle)) {
 				acpi_video_bus = acpi_cdev;
 				break;
 			}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index f61d98a..9c333d4 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -39,6 +39,19 @@
 
 comment "Native drivers"
 
+config SENSORS_AB8500
+	tristate "AB8500 thermal monitoring"
+	depends on AB8500_GPADC && AB8500_BM
+	default n
+	help
+	  If you say yes here you get support for the thermal sensor part
+	  of the AB8500 chip. The driver includes thermal management for
+	  AB8500 die and two GPADC channels. The GPADC channel are preferably
+	  used to access sensors outside the AB8500 chip.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called abx500-temp.
+
 config SENSORS_ABITUGURU
 	tristate "Abit uGuru (rev 1 & 2)"
 	depends on X86 && DMI
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index c51b0dc..d17d3e6 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -19,6 +19,7 @@
 obj-$(CONFIG_SENSORS_W83781D)	+= w83781d.o
 obj-$(CONFIG_SENSORS_W83791D)	+= w83791d.o
 
+obj-$(CONFIG_SENSORS_AB8500)	+= abx500.o ab8500.o
 obj-$(CONFIG_SENSORS_ABITUGURU)	+= abituguru.o
 obj-$(CONFIG_SENSORS_ABITUGURU3)+= abituguru3.o
 obj-$(CONFIG_SENSORS_AD7314)	+= ad7314.o
diff --git a/drivers/hwmon/ab8500.c b/drivers/hwmon/ab8500.c
new file mode 100644
index 0000000..d844dc8
--- /dev/null
+++ b/drivers/hwmon/ab8500.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) ST-Ericsson 2010 - 2013
+ * Author: Martin Persson <martin.persson@stericsson.com>
+ *         Hongbo Zhang <hongbo.zhang@linaro.org>
+ * License Terms: GNU General Public License v2
+ *
+ * When the AB8500 thermal warning temperature is reached (threshold cannot
+ * be changed by SW), an interrupt is set, and if no further action is taken
+ * within a certain time frame, pm_power off will be called.
+ *
+ * When AB8500 thermal shutdown temperature is reached a hardware shutdown of
+ * the AB8500 will occur.
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500-bm.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power/ab8500.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include "abx500.h"
+
+#define DEFAULT_POWER_OFF_DELAY	(HZ * 10)
+#define THERMAL_VCC		1800
+#define PULL_UP_RESISTOR	47000
+/* Number of monitored sensors should not greater than NUM_SENSORS */
+#define NUM_MONITORED_SENSORS	4
+
+struct ab8500_gpadc_cfg {
+	const struct abx500_res_to_temp *temp_tbl;
+	int tbl_sz;
+	int vcc;
+	int r_up;
+};
+
+struct ab8500_temp {
+	struct ab8500_gpadc *gpadc;
+	struct ab8500_btemp *btemp;
+	struct delayed_work power_off_work;
+	struct ab8500_gpadc_cfg cfg;
+	struct abx500_temp *abx500_data;
+};
+
+/*
+ * The hardware connection is like this:
+ * VCC----[ R_up ]-----[ NTC ]----GND
+ * where R_up is pull-up resistance, and GPADC measures voltage on NTC.
+ * and res_to_temp table is strictly sorted by falling resistance values.
+ */
+static int ab8500_voltage_to_temp(struct ab8500_gpadc_cfg *cfg,
+		int v_ntc, int *temp)
+{
+	int r_ntc, i = 0, tbl_sz = cfg->tbl_sz;
+	const struct abx500_res_to_temp *tbl = cfg->temp_tbl;
+
+	if (cfg->vcc < 0 || v_ntc >= cfg->vcc)
+		return -EINVAL;
+
+	r_ntc = v_ntc * cfg->r_up / (cfg->vcc - v_ntc);
+	if (r_ntc > tbl[0].resist || r_ntc < tbl[tbl_sz - 1].resist)
+		return -EINVAL;
+
+	while (!(r_ntc <= tbl[i].resist && r_ntc > tbl[i + 1].resist) &&
+			i < tbl_sz - 2)
+		i++;
+
+	/* return milli-Celsius */
+	*temp = tbl[i].temp * 1000 + ((tbl[i + 1].temp - tbl[i].temp) * 1000 *
+		(r_ntc - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist);
+
+	return 0;
+}
+
+static int ab8500_read_sensor(struct abx500_temp *data, u8 sensor, int *temp)
+{
+	int voltage, ret;
+	struct ab8500_temp *ab8500_data = data->plat_data;
+
+	if (sensor == BAT_CTRL) {
+		*temp = ab8500_btemp_get_batctrl_temp(ab8500_data->btemp);
+	} else if (sensor == BTEMP_BALL) {
+		*temp = ab8500_btemp_get_temp(ab8500_data->btemp);
+	} else {
+		voltage = ab8500_gpadc_convert(ab8500_data->gpadc, sensor);
+		if (voltage < 0)
+			return voltage;
+
+		ret = ab8500_voltage_to_temp(&ab8500_data->cfg, voltage, temp);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void ab8500_thermal_power_off(struct work_struct *work)
+{
+	struct ab8500_temp *ab8500_data = container_of(work,
+				struct ab8500_temp, power_off_work.work);
+	struct abx500_temp *abx500_data = ab8500_data->abx500_data;
+
+	dev_warn(&abx500_data->pdev->dev, "Power off due to critical temp\n");
+
+	pm_power_off();
+}
+
+static ssize_t ab8500_show_name(struct device *dev,
+		struct device_attribute *devattr, char *buf)
+{
+	return sprintf(buf, "ab8500\n");
+}
+
+static ssize_t ab8500_show_label(struct device *dev,
+		struct device_attribute *devattr, char *buf)
+{
+	char *label;
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	int index = attr->index;
+
+	switch (index) {
+	case 1:
+		label = "ext_adc1";
+		break;
+	case 2:
+		label = "ext_adc2";
+		break;
+	case 3:
+		label = "bat_temp";
+		break;
+	case 4:
+		label = "bat_ctrl";
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return sprintf(buf, "%s\n", label);
+}
+
+static int ab8500_temp_irq_handler(int irq, struct abx500_temp *data)
+{
+	struct ab8500_temp *ab8500_data = data->plat_data;
+
+	dev_warn(&data->pdev->dev, "Power off in %d s\n",
+		 DEFAULT_POWER_OFF_DELAY / HZ);
+
+	schedule_delayed_work(&ab8500_data->power_off_work,
+		DEFAULT_POWER_OFF_DELAY);
+	return 0;
+}
+
+int abx500_hwmon_init(struct abx500_temp *data)
+{
+	struct ab8500_temp *ab8500_data;
+
+	ab8500_data = devm_kzalloc(&data->pdev->dev, sizeof(*ab8500_data),
+		GFP_KERNEL);
+	if (!ab8500_data)
+		return -ENOMEM;
+
+	ab8500_data->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+	if (IS_ERR(ab8500_data->gpadc))
+		return PTR_ERR(ab8500_data->gpadc);
+
+	ab8500_data->btemp = ab8500_btemp_get();
+	if (IS_ERR(ab8500_data->btemp))
+		return PTR_ERR(ab8500_data->btemp);
+
+	INIT_DELAYED_WORK(&ab8500_data->power_off_work,
+			  ab8500_thermal_power_off);
+
+	ab8500_data->cfg.vcc = THERMAL_VCC;
+	ab8500_data->cfg.r_up = PULL_UP_RESISTOR;
+	ab8500_data->cfg.temp_tbl = ab8500_temp_tbl_a_thermistor;
+	ab8500_data->cfg.tbl_sz = ab8500_temp_tbl_a_size;
+
+	data->plat_data = ab8500_data;
+
+	/*
+	 * ADC_AUX1 and ADC_AUX2, connected to external NTC
+	 * BTEMP_BALL and BAT_CTRL, fixed usage
+	 */
+	data->gpadc_addr[0] = ADC_AUX1;
+	data->gpadc_addr[1] = ADC_AUX2;
+	data->gpadc_addr[2] = BTEMP_BALL;
+	data->gpadc_addr[3] = BAT_CTRL;
+	data->monitored_sensors = NUM_MONITORED_SENSORS;
+
+	data->ops.read_sensor = ab8500_read_sensor;
+	data->ops.irq_handler = ab8500_temp_irq_handler;
+	data->ops.show_name = ab8500_show_name;
+	data->ops.show_label = ab8500_show_label;
+	data->ops.is_visible = NULL;
+
+	return 0;
+}
+EXPORT_SYMBOL(abx500_hwmon_init);
+
+MODULE_AUTHOR("Hongbo Zhang <hongbo.zhang@linaro.org>");
+MODULE_DESCRIPTION("AB8500 temperature driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
new file mode 100644
index 0000000..b4ad87b
--- /dev/null
+++ b/drivers/hwmon/abx500.c
@@ -0,0 +1,491 @@
+/*
+ * Copyright (C) ST-Ericsson 2010 - 2013
+ * Author: Martin Persson <martin.persson@stericsson.com>
+ *         Hongbo Zhang <hongbo.zhang@linaro.org>
+ * License Terms: GNU General Public License v2
+ *
+ * ABX500 does not provide auto ADC, so to monitor the required temperatures,
+ * a periodic work is used. It is more important to not wake up the CPU than
+ * to perform this job, hence the use of a deferred delay.
+ *
+ * A deferred delay for thermal monitor is considered safe because:
+ * If the chip gets too hot during a sleep state it's most likely due to
+ * external factors, such as the surrounding temperature. I.e. no SW decisions
+ * will make any difference.
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+#include "abx500.h"
+
+#define DEFAULT_MONITOR_DELAY	HZ
+#define DEFAULT_MAX_TEMP	130
+
+static inline void schedule_monitor(struct abx500_temp *data)
+{
+	data->work_active = true;
+	schedule_delayed_work(&data->work, DEFAULT_MONITOR_DELAY);
+}
+
+static void threshold_updated(struct abx500_temp *data)
+{
+	int i;
+	for (i = 0; i < data->monitored_sensors; i++)
+		if (data->max[i] != 0 || data->min[i] != 0) {
+			schedule_monitor(data);
+			return;
+		}
+
+	dev_dbg(&data->pdev->dev, "No active thresholds.\n");
+	cancel_delayed_work_sync(&data->work);
+	data->work_active = false;
+}
+
+static void gpadc_monitor(struct work_struct *work)
+{
+	int temp, i, ret;
+	char alarm_node[30];
+	bool updated_min_alarm, updated_max_alarm;
+	struct abx500_temp *data;
+
+	data = container_of(work, struct abx500_temp, work.work);
+	mutex_lock(&data->lock);
+
+	for (i = 0; i < data->monitored_sensors; i++) {
+		/* Thresholds are considered inactive if set to 0 */
+		if (data->max[i] == 0 && data->min[i] == 0)
+			continue;
+
+		if (data->max[i] < data->min[i])
+			continue;
+
+		ret = data->ops.read_sensor(data, data->gpadc_addr[i], &temp);
+		if (ret < 0) {
+			dev_err(&data->pdev->dev, "GPADC read failed\n");
+			continue;
+		}
+
+		updated_min_alarm = false;
+		updated_max_alarm = false;
+
+		if (data->min[i] != 0) {
+			if (temp < data->min[i]) {
+				if (data->min_alarm[i] == false) {
+					data->min_alarm[i] = true;
+					updated_min_alarm = true;
+				}
+			} else {
+				if (data->min_alarm[i] == true) {
+					data->min_alarm[i] = false;
+					updated_min_alarm = true;
+				}
+			}
+		}
+		if (data->max[i] != 0) {
+			if (temp > data->max[i]) {
+				if (data->max_alarm[i] == false) {
+					data->max_alarm[i] = true;
+					updated_max_alarm = true;
+				}
+			} else if (temp < data->max[i] - data->max_hyst[i]) {
+				if (data->max_alarm[i] == true) {
+					data->max_alarm[i] = false;
+					updated_max_alarm = true;
+				}
+			}
+		}
+
+		if (updated_min_alarm) {
+			ret = sprintf(alarm_node, "temp%d_min_alarm", i + 1);
+			sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node);
+		}
+		if (updated_max_alarm) {
+			ret = sprintf(alarm_node, "temp%d_max_alarm", i + 1);
+			sysfs_notify(&data->pdev->dev.kobj, NULL, alarm_node);
+		}
+	}
+
+	schedule_monitor(data);
+	mutex_unlock(&data->lock);
+}
+
+/* HWMON sysfs interfaces */
+static ssize_t show_name(struct device *dev, struct device_attribute *devattr,
+			 char *buf)
+{
+	struct abx500_temp *data = dev_get_drvdata(dev);
+	/* Show chip name */
+	return data->ops.show_name(dev, devattr, buf);
+}
+
+static ssize_t show_label(struct device *dev,
+			  struct device_attribute *devattr, char *buf)
+{
+	struct abx500_temp *data = dev_get_drvdata(dev);
+	/* Show each sensor label */
+	return data->ops.show_label(dev, devattr, buf);
+}
+
+static ssize_t show_input(struct device *dev,
+			  struct device_attribute *devattr, char *buf)
+{
+	int ret, temp;
+	struct abx500_temp *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	u8 gpadc_addr = data->gpadc_addr[attr->index];
+
+	ret = data->ops.read_sensor(data, gpadc_addr, &temp);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%d\n", temp);
+}
+
+/* Set functions (RW nodes) */
+static ssize_t set_min(struct device *dev, struct device_attribute *devattr,
+		       const char *buf, size_t count)
+{
+	unsigned long val;
+	struct abx500_temp *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	int res = kstrtol(buf, 10, &val);
+	if (res < 0)
+		return res;
+
+	val = clamp_val(val, 0, DEFAULT_MAX_TEMP);
+
+	mutex_lock(&data->lock);
+	data->min[attr->index] = val;
+	threshold_updated(data);
+	mutex_unlock(&data->lock);
+
+	return count;
+}
+
+static ssize_t set_max(struct device *dev, struct device_attribute *devattr,
+		       const char *buf, size_t count)
+{
+	unsigned long val;
+	struct abx500_temp *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	int res = kstrtol(buf, 10, &val);
+	if (res < 0)
+		return res;
+
+	val = clamp_val(val, 0, DEFAULT_MAX_TEMP);
+
+	mutex_lock(&data->lock);
+	data->max[attr->index] = val;
+	threshold_updated(data);
+	mutex_unlock(&data->lock);
+
+	return count;
+}
+
+static ssize_t set_max_hyst(struct device *dev,
+			    struct device_attribute *devattr,
+			    const char *buf, size_t count)
+{
+	unsigned long val;
+	struct abx500_temp *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	int res = kstrtoul(buf, 10, &val);
+	if (res < 0)
+		return res;
+
+	val = clamp_val(val, 0, DEFAULT_MAX_TEMP);
+
+	mutex_lock(&data->lock);
+	data->max_hyst[attr->index] = val;
+	threshold_updated(data);
+	mutex_unlock(&data->lock);
+
+	return count;
+}
+
+/* Show functions (RO nodes) */
+static ssize_t show_min(struct device *dev,
+			struct device_attribute *devattr, char *buf)
+{
+	struct abx500_temp *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+	return sprintf(buf, "%ld\n", data->min[attr->index]);
+}
+
+static ssize_t show_max(struct device *dev,
+			struct device_attribute *devattr, char *buf)
+{
+	struct abx500_temp *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+	return sprintf(buf, "%ld\n", data->max[attr->index]);
+}
+
+static ssize_t show_max_hyst(struct device *dev,
+			     struct device_attribute *devattr, char *buf)
+{
+	struct abx500_temp *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+	return sprintf(buf, "%ld\n", data->max_hyst[attr->index]);
+}
+
+static ssize_t show_min_alarm(struct device *dev,
+			      struct device_attribute *devattr, char *buf)
+{
+	struct abx500_temp *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+	return sprintf(buf, "%d\n", data->min_alarm[attr->index]);
+}
+
+static ssize_t show_max_alarm(struct device *dev,
+			      struct device_attribute *devattr, char *buf)
+{
+	struct abx500_temp *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+
+	return sprintf(buf, "%d\n", data->max_alarm[attr->index]);
+}
+
+static mode_t abx500_attrs_visible(struct kobject *kobj,
+				   struct attribute *attr, int n)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct abx500_temp *data = dev_get_drvdata(dev);
+
+	if (data->ops.is_visible)
+		return data->ops.is_visible(attr, n);
+
+	return attr->mode;
+}
+
+/* Chip name, required by hwmon */
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
+
+/* GPADC - SENSOR1 */
+static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, show_label, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_min, set_min, 0);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_max, set_max, 0);
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
+			  show_max_hyst, set_max_hyst, 0);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_min_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_max_alarm, NULL, 0);
+
+/* GPADC - SENSOR2 */
+static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, show_label, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_input, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_min, set_min, 1);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_max, set_max, 1);
+static SENSOR_DEVICE_ATTR(temp2_max_hyst, S_IWUSR | S_IRUGO,
+			  show_max_hyst, set_max_hyst, 1);
+static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_min_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_max_alarm, NULL, 1);
+
+/* GPADC - SENSOR3 */
+static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, show_label, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_input, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_min, set_min, 2);
+static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_max, set_max, 2);
+static SENSOR_DEVICE_ATTR(temp3_max_hyst, S_IWUSR | S_IRUGO,
+			  show_max_hyst, set_max_hyst, 2);
+static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_min_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_max_alarm, NULL, 2);
+
+/* GPADC - SENSOR4 */
+static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, show_label, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_input, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp4_min, S_IWUSR | S_IRUGO, show_min, set_min, 3);
+static SENSOR_DEVICE_ATTR(temp4_max, S_IWUSR | S_IRUGO, show_max, set_max, 3);
+static SENSOR_DEVICE_ATTR(temp4_max_hyst, S_IWUSR | S_IRUGO,
+			  show_max_hyst, set_max_hyst, 3);
+static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_min_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_max_alarm, NULL, 3);
+
+struct attribute *abx500_temp_attributes[] = {
+	&sensor_dev_attr_name.dev_attr.attr,
+
+	&sensor_dev_attr_temp1_label.dev_attr.attr,
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_temp1_min.dev_attr.attr,
+	&sensor_dev_attr_temp1_max.dev_attr.attr,
+	&sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+	&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+
+	&sensor_dev_attr_temp2_label.dev_attr.attr,
+	&sensor_dev_attr_temp2_input.dev_attr.attr,
+	&sensor_dev_attr_temp2_min.dev_attr.attr,
+	&sensor_dev_attr_temp2_max.dev_attr.attr,
+	&sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
+	&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+
+	&sensor_dev_attr_temp3_label.dev_attr.attr,
+	&sensor_dev_attr_temp3_input.dev_attr.attr,
+	&sensor_dev_attr_temp3_min.dev_attr.attr,
+	&sensor_dev_attr_temp3_max.dev_attr.attr,
+	&sensor_dev_attr_temp3_max_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
+	&sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
+
+	&sensor_dev_attr_temp4_label.dev_attr.attr,
+	&sensor_dev_attr_temp4_input.dev_attr.attr,
+	&sensor_dev_attr_temp4_min.dev_attr.attr,
+	&sensor_dev_attr_temp4_max.dev_attr.attr,
+	&sensor_dev_attr_temp4_max_hyst.dev_attr.attr,
+	&sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
+	&sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group abx500_temp_group = {
+	.attrs = abx500_temp_attributes,
+	.is_visible = abx500_attrs_visible,
+};
+
+static irqreturn_t abx500_temp_irq_handler(int irq, void *irq_data)
+{
+	struct platform_device *pdev = irq_data;
+	struct abx500_temp *data = platform_get_drvdata(pdev);
+
+	data->ops.irq_handler(irq, data);
+	return IRQ_HANDLED;
+}
+
+static int setup_irqs(struct platform_device *pdev)
+{
+	int ret;
+	int irq = platform_get_irq_byname(pdev, "ABX500_TEMP_WARM");
+
+	if (irq < 0) {
+		dev_err(&pdev->dev, "Get irq by name failed\n");
+		return irq;
+	}
+
+	ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+		abx500_temp_irq_handler, IRQF_NO_SUSPEND, "abx500-temp", pdev);
+	if (ret < 0)
+		dev_err(&pdev->dev, "Request threaded irq failed (%d)\n", ret);
+
+	return ret;
+}
+
+static int abx500_temp_probe(struct platform_device *pdev)
+{
+	struct abx500_temp *data;
+	int err;
+
+	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->pdev = pdev;
+	mutex_init(&data->lock);
+
+	/* Chip specific initialization */
+	err = abx500_hwmon_init(data);
+	if (err	< 0 || !data->ops.read_sensor || !data->ops.show_name ||
+			!data->ops.show_label)
+		return err;
+
+	INIT_DEFERRABLE_WORK(&data->work, gpadc_monitor);
+
+	platform_set_drvdata(pdev, data);
+
+	err = sysfs_create_group(&pdev->dev.kobj, &abx500_temp_group);
+	if (err < 0) {
+		dev_err(&pdev->dev, "Create sysfs group failed (%d)\n", err);
+		return err;
+	}
+
+	data->hwmon_dev = hwmon_device_register(&pdev->dev);
+	if (IS_ERR(data->hwmon_dev)) {
+		err = PTR_ERR(data->hwmon_dev);
+		dev_err(&pdev->dev, "Class registration failed (%d)\n", err);
+		goto exit_sysfs_group;
+	}
+
+	if (data->ops.irq_handler) {
+		err = setup_irqs(pdev);
+		if (err < 0)
+			goto exit_hwmon_reg;
+	}
+	return 0;
+
+exit_hwmon_reg:
+	hwmon_device_unregister(data->hwmon_dev);
+exit_sysfs_group:
+	sysfs_remove_group(&pdev->dev.kobj, &abx500_temp_group);
+	return err;
+}
+
+static int abx500_temp_remove(struct platform_device *pdev)
+{
+	struct abx500_temp *data = platform_get_drvdata(pdev);
+
+	cancel_delayed_work_sync(&data->work);
+	hwmon_device_unregister(data->hwmon_dev);
+	sysfs_remove_group(&pdev->dev.kobj, &abx500_temp_group);
+
+	return 0;
+}
+
+static int abx500_temp_suspend(struct platform_device *pdev,
+			       pm_message_t state)
+{
+	struct abx500_temp *data = platform_get_drvdata(pdev);
+
+	if (data->work_active)
+		cancel_delayed_work_sync(&data->work);
+
+	return 0;
+}
+
+static int abx500_temp_resume(struct platform_device *pdev)
+{
+	struct abx500_temp *data = platform_get_drvdata(pdev);
+
+	if (data->work_active)
+		schedule_monitor(data);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id abx500_temp_match[] = {
+	{ .compatible = "stericsson,abx500-temp" },
+	{},
+};
+#endif
+
+static struct platform_driver abx500_temp_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "abx500-temp",
+		.of_match_table = of_match_ptr(abx500_temp_match),
+	},
+	.suspend = abx500_temp_suspend,
+	.resume = abx500_temp_resume,
+	.probe = abx500_temp_probe,
+	.remove = abx500_temp_remove,
+};
+
+module_platform_driver(abx500_temp_driver);
+
+MODULE_AUTHOR("Martin Persson <martin.persson@stericsson.com>");
+MODULE_DESCRIPTION("ABX500 temperature driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/abx500.h b/drivers/hwmon/abx500.h
new file mode 100644
index 0000000..9b295e6
--- /dev/null
+++ b/drivers/hwmon/abx500.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) ST-Ericsson 2010 - 2013
+ * License terms: GNU General Public License v2
+ * Author: Martin Persson <martin.persson@stericsson.com>
+ *         Hongbo Zhang <hongbo.zhang@linaro.com>
+ */
+
+#ifndef _ABX500_H
+#define _ABX500_H
+
+#define NUM_SENSORS 5
+
+struct abx500_temp;
+
+/*
+ * struct abx500_temp_ops - abx500 chip specific ops
+ * @read_sensor: reads gpadc output
+ * @irq_handler: irq handler
+ * @show_name: hwmon device name
+ * @show_label: hwmon attribute label
+ * @is_visible: is attribute visible
+ */
+struct abx500_temp_ops {
+	int (*read_sensor)(struct abx500_temp *, u8, int *);
+	int (*irq_handler)(int, struct abx500_temp *);
+	ssize_t (*show_name)(struct device *,
+			struct device_attribute *, char *);
+	ssize_t (*show_label) (struct device *,
+			struct device_attribute *, char *);
+	int (*is_visible)(struct attribute *, int);
+};
+
+/*
+ * struct abx500_temp - representation of temp mon device
+ * @pdev: platform device
+ * @hwmon_dev: hwmon device
+ * @ops: abx500 chip specific ops
+ * @gpadc_addr: gpadc channel address
+ * @min: sensor temperature min value
+ * @max: sensor temperature max value
+ * @max_hyst: sensor temperature hysteresis value for max limit
+ * @min_alarm: sensor temperature min alarm
+ * @max_alarm: sensor temperature max alarm
+ * @work: delayed work scheduled to monitor temperature periodically
+ * @work_active: True if work is active
+ * @lock: mutex
+ * @monitored_sensors: number of monitored sensors
+ * @plat_data: private usage, usually points to platform specific data
+ */
+struct abx500_temp {
+	struct platform_device *pdev;
+	struct device *hwmon_dev;
+	struct abx500_temp_ops ops;
+	u8 gpadc_addr[NUM_SENSORS];
+	unsigned long min[NUM_SENSORS];
+	unsigned long max[NUM_SENSORS];
+	unsigned long max_hyst[NUM_SENSORS];
+	bool min_alarm[NUM_SENSORS];
+	bool max_alarm[NUM_SENSORS];
+	struct delayed_work work;
+	bool work_active;
+	struct mutex lock;
+	int monitored_sensors;
+	void *plat_data;
+};
+
+int abx500_hwmon_init(struct abx500_temp *data);
+
+#endif /* _ABX500_H */
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 1a38dd7..0e8fab1 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -71,7 +71,6 @@
 static struct cpuidle_driver intel_idle_driver = {
 	.name = "intel_idle",
 	.owner = THIS_MODULE,
-	.en_core_tk_irqen = 1,
 };
 /* intel_idle.max_cstate=0 disables driver */
 static int max_cstate = CPUIDLE_STATE_MAX - 1;
@@ -339,7 +338,6 @@
 	if (!(lapic_timer_reliable_states & (1 << (cstate))))
 		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
 
-	stop_critical_timings();
 	if (!need_resched()) {
 
 		__monitor((void *)&current_thread_info()->flags, 0, 0);
@@ -348,8 +346,6 @@
 			__mwait(eax, ecx);
 	}
 
-	start_critical_timings();
-
 	if (!(lapic_timer_reliable_states & (1 << (cstate))))
 		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
 
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index a0f29c1..c85b56c 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -59,5 +59,6 @@
 source "drivers/infiniband/ulp/srpt/Kconfig"
 
 source "drivers/infiniband/ulp/iser/Kconfig"
+source "drivers/infiniband/ulp/isert/Kconfig"
 
 endif # INFINIBAND
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index bf846a1..b126fef 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -13,3 +13,4 @@
 obj-$(CONFIG_INFINIBAND_SRP)		+= ulp/srp/
 obj-$(CONFIG_INFINIBAND_SRPT)		+= ulp/srpt/
 obj-$(CONFIG_INFINIBAND_ISER)		+= ulp/iser/
+obj-$(CONFIG_INFINIBAND_ISERT)		+= ulp/isert/
diff --git a/drivers/infiniband/ulp/isert/Kconfig b/drivers/infiniband/ulp/isert/Kconfig
new file mode 100644
index 0000000..ce3fd32
--- /dev/null
+++ b/drivers/infiniband/ulp/isert/Kconfig
@@ -0,0 +1,5 @@
+config INFINIBAND_ISERT
+	tristate "iSCSI Extentions for RDMA (iSER) target support"
+	depends on INET && INFINIBAND_ADDR_TRANS && TARGET_CORE && ISCSI_TARGET
+	---help---
+	Support for iSCSI Extentions for RDMA (iSER) Target on Infiniband fabrics.
diff --git a/drivers/infiniband/ulp/isert/Makefile b/drivers/infiniband/ulp/isert/Makefile
new file mode 100644
index 0000000..c8bf242
--- /dev/null
+++ b/drivers/infiniband/ulp/isert/Makefile
@@ -0,0 +1,2 @@
+ccflags-y		:= -Idrivers/target -Idrivers/target/iscsi
+obj-$(CONFIG_INFINIBAND_ISERT)	+= ib_isert.o
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
new file mode 100644
index 0000000..41712f0
--- /dev/null
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -0,0 +1,2281 @@
+/*******************************************************************************
+ * This file contains iSCSI extentions for RDMA (iSER) Verbs
+ *
+ * (c) Copyright 2013 RisingTide Systems LLC.
+ *
+ * Nicholas A. Bellinger <nab@linux-iscsi.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/iscsi/iscsi_transport.h>
+
+#include "isert_proto.h"
+#include "ib_isert.h"
+
+#define	ISERT_MAX_CONN		8
+#define ISER_MAX_RX_CQ_LEN	(ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
+#define ISER_MAX_TX_CQ_LEN	(ISERT_QP_MAX_REQ_DTOS  * ISERT_MAX_CONN)
+
+static DEFINE_MUTEX(device_list_mutex);
+static LIST_HEAD(device_list);
+static struct workqueue_struct *isert_rx_wq;
+static struct workqueue_struct *isert_comp_wq;
+static struct kmem_cache *isert_cmd_cache;
+
+static void
+isert_qp_event_callback(struct ib_event *e, void *context)
+{
+	struct isert_conn *isert_conn = (struct isert_conn *)context;
+
+	pr_err("isert_qp_event_callback event: %d\n", e->event);
+	switch (e->event) {
+	case IB_EVENT_COMM_EST:
+		rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
+		break;
+	case IB_EVENT_QP_LAST_WQE_REACHED:
+		pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
+		break;
+	default:
+		break;
+	}
+}
+
+static int
+isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
+{
+	int ret;
+
+	ret = ib_query_device(ib_dev, devattr);
+	if (ret) {
+		pr_err("ib_query_device() failed: %d\n", ret);
+		return ret;
+	}
+	pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
+	pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
+
+	return 0;
+}
+
+static int
+isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
+{
+	struct isert_device *device = isert_conn->conn_device;
+	struct ib_qp_init_attr attr;
+	struct ib_device_attr devattr;
+	int ret, index, min_index = 0;
+
+	memset(&devattr, 0, sizeof(struct ib_device_attr));
+	ret = isert_query_device(cma_id->device, &devattr);
+	if (ret)
+		return ret;
+
+	mutex_lock(&device_list_mutex);
+	for (index = 0; index < device->cqs_used; index++)
+		if (device->cq_active_qps[index] <
+		    device->cq_active_qps[min_index])
+			min_index = index;
+	device->cq_active_qps[min_index]++;
+	pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
+	mutex_unlock(&device_list_mutex);
+
+	memset(&attr, 0, sizeof(struct ib_qp_init_attr));
+	attr.event_handler = isert_qp_event_callback;
+	attr.qp_context = isert_conn;
+	attr.send_cq = device->dev_tx_cq[min_index];
+	attr.recv_cq = device->dev_rx_cq[min_index];
+	attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
+	attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
+	/*
+	 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
+	 * work-around for RDMA_READ..
+	 */
+	attr.cap.max_send_sge = devattr.max_sge - 2;
+	isert_conn->max_sge = attr.cap.max_send_sge;
+
+	attr.cap.max_recv_sge = 1;
+	attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+	attr.qp_type = IB_QPT_RC;
+
+	pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
+		 cma_id->device);
+	pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
+		 isert_conn->conn_pd->device);
+
+	ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
+	if (ret) {
+		pr_err("rdma_create_qp failed for cma_id %d\n", ret);
+		return ret;
+	}
+	isert_conn->conn_qp = cma_id->qp;
+	pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
+
+	return 0;
+}
+
+static void
+isert_cq_event_callback(struct ib_event *e, void *context)
+{
+	pr_debug("isert_cq_event_callback event: %d\n", e->event);
+}
+
+static int
+isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
+{
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	struct iser_rx_desc *rx_desc;
+	struct ib_sge *rx_sg;
+	u64 dma_addr;
+	int i, j;
+
+	isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
+				sizeof(struct iser_rx_desc), GFP_KERNEL);
+	if (!isert_conn->conn_rx_descs)
+		goto fail;
+
+	rx_desc = isert_conn->conn_rx_descs;
+
+	for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
+		dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
+					ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+		if (ib_dma_mapping_error(ib_dev, dma_addr))
+			goto dma_map_fail;
+
+		rx_desc->dma_addr = dma_addr;
+
+		rx_sg = &rx_desc->rx_sg;
+		rx_sg->addr = rx_desc->dma_addr;
+		rx_sg->length = ISER_RX_PAYLOAD_SIZE;
+		rx_sg->lkey = isert_conn->conn_mr->lkey;
+	}
+
+	isert_conn->conn_rx_desc_head = 0;
+	return 0;
+
+dma_map_fail:
+	rx_desc = isert_conn->conn_rx_descs;
+	for (j = 0; j < i; j++, rx_desc++) {
+		ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
+				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+	}
+	kfree(isert_conn->conn_rx_descs);
+	isert_conn->conn_rx_descs = NULL;
+fail:
+	return -ENOMEM;
+}
+
+static void
+isert_free_rx_descriptors(struct isert_conn *isert_conn)
+{
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	struct iser_rx_desc *rx_desc;
+	int i;
+
+	if (!isert_conn->conn_rx_descs)
+		return;
+
+	rx_desc = isert_conn->conn_rx_descs;
+	for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
+		ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
+				    ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+	}
+
+	kfree(isert_conn->conn_rx_descs);
+	isert_conn->conn_rx_descs = NULL;
+}
+
+static void isert_cq_tx_callback(struct ib_cq *, void *);
+static void isert_cq_rx_callback(struct ib_cq *, void *);
+
+static int
+isert_create_device_ib_res(struct isert_device *device)
+{
+	struct ib_device *ib_dev = device->ib_device;
+	struct isert_cq_desc *cq_desc;
+	int ret = 0, i, j;
+
+	device->cqs_used = min_t(int, num_online_cpus(),
+				 device->ib_device->num_comp_vectors);
+	device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
+	pr_debug("Using %d CQs, device %s supports %d vectors\n",
+		 device->cqs_used, device->ib_device->name,
+		 device->ib_device->num_comp_vectors);
+	device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
+				device->cqs_used, GFP_KERNEL);
+	if (!device->cq_desc) {
+		pr_err("Unable to allocate device->cq_desc\n");
+		return -ENOMEM;
+	}
+	cq_desc = device->cq_desc;
+
+	device->dev_pd = ib_alloc_pd(ib_dev);
+	if (IS_ERR(device->dev_pd)) {
+		ret = PTR_ERR(device->dev_pd);
+		pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
+		goto out_cq_desc;
+	}
+
+	for (i = 0; i < device->cqs_used; i++) {
+		cq_desc[i].device = device;
+		cq_desc[i].cq_index = i;
+
+		device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
+						isert_cq_rx_callback,
+						isert_cq_event_callback,
+						(void *)&cq_desc[i],
+						ISER_MAX_RX_CQ_LEN, i);
+		if (IS_ERR(device->dev_rx_cq[i]))
+			goto out_cq;
+
+		device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
+						isert_cq_tx_callback,
+						isert_cq_event_callback,
+						(void *)&cq_desc[i],
+						ISER_MAX_TX_CQ_LEN, i);
+		if (IS_ERR(device->dev_tx_cq[i]))
+			goto out_cq;
+
+		if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP))
+			goto out_cq;
+
+		if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP))
+			goto out_cq;
+	}
+
+	device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
+	if (IS_ERR(device->dev_mr)) {
+		ret = PTR_ERR(device->dev_mr);
+		pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
+		goto out_cq;
+	}
+
+	return 0;
+
+out_cq:
+	for (j = 0; j < i; j++) {
+		cq_desc = &device->cq_desc[j];
+
+		if (device->dev_rx_cq[j]) {
+			cancel_work_sync(&cq_desc->cq_rx_work);
+			ib_destroy_cq(device->dev_rx_cq[j]);
+		}
+		if (device->dev_tx_cq[j]) {
+			cancel_work_sync(&cq_desc->cq_tx_work);
+			ib_destroy_cq(device->dev_tx_cq[j]);
+		}
+	}
+	ib_dealloc_pd(device->dev_pd);
+
+out_cq_desc:
+	kfree(device->cq_desc);
+
+	return ret;
+}
+
+static void
+isert_free_device_ib_res(struct isert_device *device)
+{
+	struct isert_cq_desc *cq_desc;
+	int i;
+
+	for (i = 0; i < device->cqs_used; i++) {
+		cq_desc = &device->cq_desc[i];
+
+		cancel_work_sync(&cq_desc->cq_rx_work);
+		cancel_work_sync(&cq_desc->cq_tx_work);
+		ib_destroy_cq(device->dev_rx_cq[i]);
+		ib_destroy_cq(device->dev_tx_cq[i]);
+		device->dev_rx_cq[i] = NULL;
+		device->dev_tx_cq[i] = NULL;
+	}
+
+	ib_dereg_mr(device->dev_mr);
+	ib_dealloc_pd(device->dev_pd);
+	kfree(device->cq_desc);
+}
+
+static void
+isert_device_try_release(struct isert_device *device)
+{
+	mutex_lock(&device_list_mutex);
+	device->refcount--;
+	if (!device->refcount) {
+		isert_free_device_ib_res(device);
+		list_del(&device->dev_node);
+		kfree(device);
+	}
+	mutex_unlock(&device_list_mutex);
+}
+
+static struct isert_device *
+isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
+{
+	struct isert_device *device;
+	int ret;
+
+	mutex_lock(&device_list_mutex);
+	list_for_each_entry(device, &device_list, dev_node) {
+		if (device->ib_device->node_guid == cma_id->device->node_guid) {
+			device->refcount++;
+			mutex_unlock(&device_list_mutex);
+			return device;
+		}
+	}
+
+	device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
+	if (!device) {
+		mutex_unlock(&device_list_mutex);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	INIT_LIST_HEAD(&device->dev_node);
+
+	device->ib_device = cma_id->device;
+	ret = isert_create_device_ib_res(device);
+	if (ret) {
+		kfree(device);
+		mutex_unlock(&device_list_mutex);
+		return ERR_PTR(ret);
+	}
+
+	device->refcount++;
+	list_add_tail(&device->dev_node, &device_list);
+	mutex_unlock(&device_list_mutex);
+
+	return device;
+}
+
+static int
+isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+{
+	struct iscsi_np *np = cma_id->context;
+	struct isert_np *isert_np = np->np_context;
+	struct isert_conn *isert_conn;
+	struct isert_device *device;
+	struct ib_device *ib_dev = cma_id->device;
+	int ret = 0;
+
+	pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
+		 cma_id, cma_id->context);
+
+	isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
+	if (!isert_conn) {
+		pr_err("Unable to allocate isert_conn\n");
+		return -ENOMEM;
+	}
+	isert_conn->state = ISER_CONN_INIT;
+	INIT_LIST_HEAD(&isert_conn->conn_accept_node);
+	init_completion(&isert_conn->conn_login_comp);
+	init_waitqueue_head(&isert_conn->conn_wait);
+	init_waitqueue_head(&isert_conn->conn_wait_comp_err);
+	kref_init(&isert_conn->conn_kref);
+	kref_get(&isert_conn->conn_kref);
+
+	cma_id->context = isert_conn;
+	isert_conn->conn_cm_id = cma_id;
+	isert_conn->responder_resources = event->param.conn.responder_resources;
+	isert_conn->initiator_depth = event->param.conn.initiator_depth;
+	pr_debug("Using responder_resources: %u initiator_depth: %u\n",
+		 isert_conn->responder_resources, isert_conn->initiator_depth);
+
+	isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
+					ISER_RX_LOGIN_SIZE, GFP_KERNEL);
+	if (!isert_conn->login_buf) {
+		pr_err("Unable to allocate isert_conn->login_buf\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	isert_conn->login_req_buf = isert_conn->login_buf;
+	isert_conn->login_rsp_buf = isert_conn->login_buf +
+				    ISCSI_DEF_MAX_RECV_SEG_LEN;
+	pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
+		 isert_conn->login_buf, isert_conn->login_req_buf,
+		 isert_conn->login_rsp_buf);
+
+	isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
+				(void *)isert_conn->login_req_buf,
+				ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
+
+	ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
+	if (ret) {
+		pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
+		       ret);
+		isert_conn->login_req_dma = 0;
+		goto out_login_buf;
+	}
+
+	isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
+					(void *)isert_conn->login_rsp_buf,
+					ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
+
+	ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
+	if (ret) {
+		pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
+		       ret);
+		isert_conn->login_rsp_dma = 0;
+		goto out_req_dma_map;
+	}
+
+	device = isert_device_find_by_ib_dev(cma_id);
+	if (IS_ERR(device)) {
+		ret = PTR_ERR(device);
+		goto out_rsp_dma_map;
+	}
+
+	isert_conn->conn_device = device;
+	isert_conn->conn_pd = device->dev_pd;
+	isert_conn->conn_mr = device->dev_mr;
+
+	ret = isert_conn_setup_qp(isert_conn, cma_id);
+	if (ret)
+		goto out_conn_dev;
+
+	mutex_lock(&isert_np->np_accept_mutex);
+	list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node);
+	mutex_unlock(&isert_np->np_accept_mutex);
+
+	pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np);
+	wake_up(&isert_np->np_accept_wq);
+	return 0;
+
+out_conn_dev:
+	isert_device_try_release(device);
+out_rsp_dma_map:
+	ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
+			    ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
+out_req_dma_map:
+	ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
+			    ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
+out_login_buf:
+	kfree(isert_conn->login_buf);
+out:
+	kfree(isert_conn);
+	return ret;
+}
+
+static void
+isert_connect_release(struct isert_conn *isert_conn)
+{
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	struct isert_device *device = isert_conn->conn_device;
+	int cq_index;
+
+	pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+
+	if (isert_conn->conn_qp) {
+		cq_index = ((struct isert_cq_desc *)
+			isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
+		pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
+		isert_conn->conn_device->cq_active_qps[cq_index]--;
+
+		rdma_destroy_qp(isert_conn->conn_cm_id);
+	}
+
+	isert_free_rx_descriptors(isert_conn);
+	rdma_destroy_id(isert_conn->conn_cm_id);
+
+	if (isert_conn->login_buf) {
+		ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
+				    ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
+		ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
+				    ISCSI_DEF_MAX_RECV_SEG_LEN,
+				    DMA_FROM_DEVICE);
+		kfree(isert_conn->login_buf);
+	}
+	kfree(isert_conn);
+
+	if (device)
+		isert_device_try_release(device);
+
+	pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
+}
+
+static void
+isert_connected_handler(struct rdma_cm_id *cma_id)
+{
+	return;
+}
+
+static void
+isert_release_conn_kref(struct kref *kref)
+{
+	struct isert_conn *isert_conn = container_of(kref,
+				struct isert_conn, conn_kref);
+
+	pr_debug("Calling isert_connect_release for final kref %s/%d\n",
+		 current->comm, current->pid);
+
+	isert_connect_release(isert_conn);
+}
+
+static void
+isert_put_conn(struct isert_conn *isert_conn)
+{
+	kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
+}
+
+static void
+isert_disconnect_work(struct work_struct *work)
+{
+	struct isert_conn *isert_conn = container_of(work,
+				struct isert_conn, conn_logout_work);
+
+	pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+
+	isert_conn->state = ISER_CONN_DOWN;
+
+	if (isert_conn->post_recv_buf_count == 0 &&
+	    atomic_read(&isert_conn->post_send_buf_count) == 0) {
+		pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
+		wake_up(&isert_conn->conn_wait);
+	}
+
+	isert_put_conn(isert_conn);
+}
+
+static void
+isert_disconnected_handler(struct rdma_cm_id *cma_id)
+{
+	struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
+
+	INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
+	schedule_work(&isert_conn->conn_logout_work);
+}
+
+static int
+isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+{
+	int ret = 0;
+
+	pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
+		 event->event, event->status, cma_id->context, cma_id);
+
+	switch (event->event) {
+	case RDMA_CM_EVENT_CONNECT_REQUEST:
+		pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
+		ret = isert_connect_request(cma_id, event);
+		break;
+	case RDMA_CM_EVENT_ESTABLISHED:
+		pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
+		isert_connected_handler(cma_id);
+		break;
+	case RDMA_CM_EVENT_DISCONNECTED:
+		pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
+		isert_disconnected_handler(cma_id);
+		break;
+	case RDMA_CM_EVENT_DEVICE_REMOVAL:
+	case RDMA_CM_EVENT_ADDR_CHANGE:
+		break;
+	case RDMA_CM_EVENT_CONNECT_ERROR:
+	default:
+		pr_err("Unknown RDMA CMA event: %d\n", event->event);
+		break;
+	}
+
+	if (ret != 0) {
+		pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
+		       event->event, ret);
+		dump_stack();
+	}
+
+	return ret;
+}
+
+static int
+isert_post_recv(struct isert_conn *isert_conn, u32 count)
+{
+	struct ib_recv_wr *rx_wr, *rx_wr_failed;
+	int i, ret;
+	unsigned int rx_head = isert_conn->conn_rx_desc_head;
+	struct iser_rx_desc *rx_desc;
+
+	for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
+		rx_desc		= &isert_conn->conn_rx_descs[rx_head];
+		rx_wr->wr_id	= (unsigned long)rx_desc;
+		rx_wr->sg_list	= &rx_desc->rx_sg;
+		rx_wr->num_sge	= 1;
+		rx_wr->next	= rx_wr + 1;
+		rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
+	}
+
+	rx_wr--;
+	rx_wr->next = NULL; /* mark end of work requests list */
+
+	isert_conn->post_recv_buf_count += count;
+	ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
+				&rx_wr_failed);
+	if (ret) {
+		pr_err("ib_post_recv() failed with ret: %d\n", ret);
+		isert_conn->post_recv_buf_count -= count;
+	} else {
+		pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
+		isert_conn->conn_rx_desc_head = rx_head;
+	}
+	return ret;
+}
+
+static int
+isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
+{
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	struct ib_send_wr send_wr, *send_wr_failed;
+	int ret;
+
+	ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
+				      ISER_HEADERS_LEN, DMA_TO_DEVICE);
+
+	send_wr.next	= NULL;
+	send_wr.wr_id	= (unsigned long)tx_desc;
+	send_wr.sg_list	= tx_desc->tx_sg;
+	send_wr.num_sge	= tx_desc->num_sge;
+	send_wr.opcode	= IB_WR_SEND;
+	send_wr.send_flags = IB_SEND_SIGNALED;
+
+	atomic_inc(&isert_conn->post_send_buf_count);
+
+	ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
+	if (ret) {
+		pr_err("ib_post_send() failed, ret: %d\n", ret);
+		atomic_dec(&isert_conn->post_send_buf_count);
+	}
+
+	return ret;
+}
+
+static void
+isert_create_send_desc(struct isert_conn *isert_conn,
+		       struct isert_cmd *isert_cmd,
+		       struct iser_tx_desc *tx_desc)
+{
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+
+	ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
+				   ISER_HEADERS_LEN, DMA_TO_DEVICE);
+
+	memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
+	tx_desc->iser_header.flags = ISER_VER;
+
+	tx_desc->num_sge = 1;
+	tx_desc->isert_cmd = isert_cmd;
+
+	if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
+		tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
+		pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
+	}
+}
+
+static int
+isert_init_tx_hdrs(struct isert_conn *isert_conn,
+		   struct iser_tx_desc *tx_desc)
+{
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	u64 dma_addr;
+
+	dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
+			ISER_HEADERS_LEN, DMA_TO_DEVICE);
+	if (ib_dma_mapping_error(ib_dev, dma_addr)) {
+		pr_err("ib_dma_mapping_error() failed\n");
+		return -ENOMEM;
+	}
+
+	tx_desc->dma_addr = dma_addr;
+	tx_desc->tx_sg[0].addr	= tx_desc->dma_addr;
+	tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
+	tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
+
+	pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
+		 " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
+		 tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
+
+	return 0;
+}
+
+static void
+isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr)
+{
+	isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
+	send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
+	send_wr->opcode = IB_WR_SEND;
+	send_wr->send_flags = IB_SEND_SIGNALED;
+	send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0];
+	send_wr->num_sge = isert_cmd->tx_desc.num_sge;
+}
+
+static int
+isert_rdma_post_recvl(struct isert_conn *isert_conn)
+{
+	struct ib_recv_wr rx_wr, *rx_wr_fail;
+	struct ib_sge sge;
+	int ret;
+
+	memset(&sge, 0, sizeof(struct ib_sge));
+	sge.addr = isert_conn->login_req_dma;
+	sge.length = ISER_RX_LOGIN_SIZE;
+	sge.lkey = isert_conn->conn_mr->lkey;
+
+	pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
+		sge.addr, sge.length, sge.lkey);
+
+	memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
+	rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
+	rx_wr.sg_list = &sge;
+	rx_wr.num_sge = 1;
+
+	isert_conn->post_recv_buf_count++;
+	ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
+	if (ret) {
+		pr_err("ib_post_recv() failed: %d\n", ret);
+		isert_conn->post_recv_buf_count--;
+	}
+
+	pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
+	return ret;
+}
+
+static int
+isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
+		   u32 length)
+{
+	struct isert_conn *isert_conn = conn->context;
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
+	int ret;
+
+	isert_create_send_desc(isert_conn, NULL, tx_desc);
+
+	memcpy(&tx_desc->iscsi_header, &login->rsp[0],
+	       sizeof(struct iscsi_hdr));
+
+	isert_init_tx_hdrs(isert_conn, tx_desc);
+
+	if (length > 0) {
+		struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
+
+		ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
+					   length, DMA_TO_DEVICE);
+
+		memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
+
+		ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
+					      length, DMA_TO_DEVICE);
+
+		tx_dsg->addr	= isert_conn->login_rsp_dma;
+		tx_dsg->length	= length;
+		tx_dsg->lkey	= isert_conn->conn_mr->lkey;
+		tx_desc->num_sge = 2;
+	}
+	if (!login->login_failed) {
+		if (login->login_complete) {
+			ret = isert_alloc_rx_descriptors(isert_conn);
+			if (ret)
+				return ret;
+
+			ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
+			if (ret)
+				return ret;
+
+			isert_conn->state = ISER_CONN_UP;
+			goto post_send;
+		}
+
+		ret = isert_rdma_post_recvl(isert_conn);
+		if (ret)
+			return ret;
+	}
+post_send:
+	ret = isert_post_send(isert_conn, tx_desc);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void
+isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
+		   struct isert_conn *isert_conn)
+{
+	struct iscsi_conn *conn = isert_conn->conn;
+	struct iscsi_login *login = conn->conn_login;
+	int size;
+
+	if (!login) {
+		pr_err("conn->conn_login is NULL\n");
+		dump_stack();
+		return;
+	}
+
+	if (login->first_request) {
+		struct iscsi_login_req *login_req =
+			(struct iscsi_login_req *)&rx_desc->iscsi_header;
+		/*
+		 * Setup the initial iscsi_login values from the leading
+		 * login request PDU.
+		 */
+		login->leading_connection = (!login_req->tsih) ? 1 : 0;
+		login->current_stage =
+			(login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
+			 >> 2;
+		login->version_min	= login_req->min_version;
+		login->version_max	= login_req->max_version;
+		memcpy(login->isid, login_req->isid, 6);
+		login->cmd_sn		= be32_to_cpu(login_req->cmdsn);
+		login->init_task_tag	= login_req->itt;
+		login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
+		login->cid		= be16_to_cpu(login_req->cid);
+		login->tsih		= be16_to_cpu(login_req->tsih);
+	}
+
+	memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
+
+	size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
+	pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
+		 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
+	memcpy(login->req_buf, &rx_desc->data[0], size);
+
+	complete(&isert_conn->conn_login_comp);
+}
+
+static void
+isert_release_cmd(struct iscsi_cmd *cmd)
+{
+	struct isert_cmd *isert_cmd = container_of(cmd, struct isert_cmd,
+						   iscsi_cmd);
+
+	pr_debug("Entering isert_release_cmd %p >>>>>>>>>>>>>>>.\n", isert_cmd);
+
+	kfree(cmd->buf_ptr);
+	kfree(cmd->tmr_req);
+
+	kmem_cache_free(isert_cmd_cache, isert_cmd);
+}
+
+static struct iscsi_cmd
+*isert_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp)
+{
+	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+	struct isert_cmd *isert_cmd;
+
+	isert_cmd = kmem_cache_zalloc(isert_cmd_cache, gfp);
+	if (!isert_cmd) {
+		pr_err("Unable to allocate isert_cmd\n");
+		return NULL;
+	}
+	isert_cmd->conn = isert_conn;
+	isert_cmd->iscsi_cmd.release_cmd = &isert_release_cmd;
+
+	return &isert_cmd->iscsi_cmd;
+}
+
+static int
+isert_handle_scsi_cmd(struct isert_conn *isert_conn,
+		      struct isert_cmd *isert_cmd, struct iser_rx_desc *rx_desc,
+		      unsigned char *buf)
+{
+	struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
+	struct iscsi_conn *conn = isert_conn->conn;
+	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
+	struct scatterlist *sg;
+	int imm_data, imm_data_len, unsol_data, sg_nents, rc;
+	bool dump_payload = false;
+
+	rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
+	if (rc < 0)
+		return rc;
+
+	imm_data = cmd->immediate_data;
+	imm_data_len = cmd->first_burst_len;
+	unsol_data = cmd->unsolicited_data;
+
+	rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
+	if (rc < 0) {
+		return 0;
+	} else if (rc > 0) {
+		dump_payload = true;
+		goto sequence_cmd;
+	}
+
+	if (!imm_data)
+		return 0;
+
+	sg = &cmd->se_cmd.t_data_sg[0];
+	sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
+
+	pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
+		 sg, sg_nents, &rx_desc->data[0], imm_data_len);
+
+	sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
+
+	cmd->write_data_done += imm_data_len;
+
+	if (cmd->write_data_done == cmd->se_cmd.data_length) {
+		spin_lock_bh(&cmd->istate_lock);
+		cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
+		cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+		spin_unlock_bh(&cmd->istate_lock);
+	}
+
+sequence_cmd:
+	rc = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
+
+	if (!rc && dump_payload == false && unsol_data)
+		iscsit_set_unsoliticed_dataout(cmd);
+
+	if (rc == CMDSN_ERROR_CANNOT_RECOVER)
+		return iscsit_add_reject_from_cmd(
+			   ISCSI_REASON_PROTOCOL_ERROR,
+			   1, 0, (unsigned char *)hdr, cmd);
+
+	return 0;
+}
+
+static int
+isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
+			   struct iser_rx_desc *rx_desc, unsigned char *buf)
+{
+	struct scatterlist *sg_start;
+	struct iscsi_conn *conn = isert_conn->conn;
+	struct iscsi_cmd *cmd = NULL;
+	struct iscsi_data *hdr = (struct iscsi_data *)buf;
+	u32 unsol_data_len = ntoh24(hdr->dlength);
+	int rc, sg_nents, sg_off, page_off;
+
+	rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
+	if (rc < 0)
+		return rc;
+	else if (!cmd)
+		return 0;
+	/*
+	 * FIXME: Unexpected unsolicited_data out
+	 */
+	if (!cmd->unsolicited_data) {
+		pr_err("Received unexpected solicited data payload\n");
+		dump_stack();
+		return -1;
+	}
+
+	pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
+		 unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
+
+	sg_off = cmd->write_data_done / PAGE_SIZE;
+	sg_start = &cmd->se_cmd.t_data_sg[sg_off];
+	sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
+	page_off = cmd->write_data_done % PAGE_SIZE;
+	/*
+	 * FIXME: Non page-aligned unsolicited_data out
+	 */
+	if (page_off) {
+		pr_err("Received unexpected non-page aligned data payload\n");
+		dump_stack();
+		return -1;
+	}
+	pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
+		 sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
+
+	sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
+			    unsol_data_len);
+
+	rc = iscsit_check_dataout_payload(cmd, hdr, false);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
+static int
+isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
+		uint32_t read_stag, uint64_t read_va,
+		uint32_t write_stag, uint64_t write_va)
+{
+	struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
+	struct iscsi_conn *conn = isert_conn->conn;
+	struct iscsi_cmd *cmd;
+	struct isert_cmd *isert_cmd;
+	int ret = -EINVAL;
+	u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
+
+	switch (opcode) {
+	case ISCSI_OP_SCSI_CMD:
+		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+		if (!cmd)
+			break;
+
+		isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd);
+		isert_cmd->read_stag = read_stag;
+		isert_cmd->read_va = read_va;
+		isert_cmd->write_stag = write_stag;
+		isert_cmd->write_va = write_va;
+
+		ret = isert_handle_scsi_cmd(isert_conn, isert_cmd,
+					rx_desc, (unsigned char *)hdr);
+		break;
+	case ISCSI_OP_NOOP_OUT:
+		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+		if (!cmd)
+			break;
+
+		ret = iscsit_handle_nop_out(conn, cmd, (unsigned char *)hdr);
+		break;
+	case ISCSI_OP_SCSI_DATA_OUT:
+		ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
+						(unsigned char *)hdr);
+		break;
+	case ISCSI_OP_SCSI_TMFUNC:
+		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+		if (!cmd)
+			break;
+
+		ret = iscsit_handle_task_mgt_cmd(conn, cmd,
+						(unsigned char *)hdr);
+		break;
+	case ISCSI_OP_LOGOUT:
+		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+		if (!cmd)
+			break;
+
+		ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
+		if (ret > 0)
+			wait_for_completion_timeout(&conn->conn_logout_comp,
+						    SECONDS_FOR_LOGOUT_COMP *
+						    HZ);
+		break;
+	default:
+		pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
+		dump_stack();
+		break;
+	}
+
+	return ret;
+}
+
+static void
+isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
+{
+	struct iser_hdr *iser_hdr = &rx_desc->iser_header;
+	uint64_t read_va = 0, write_va = 0;
+	uint32_t read_stag = 0, write_stag = 0;
+	int rc;
+
+	switch (iser_hdr->flags & 0xF0) {
+	case ISCSI_CTRL:
+		if (iser_hdr->flags & ISER_RSV) {
+			read_stag = be32_to_cpu(iser_hdr->read_stag);
+			read_va = be64_to_cpu(iser_hdr->read_va);
+			pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
+				 read_stag, (unsigned long long)read_va);
+		}
+		if (iser_hdr->flags & ISER_WSV) {
+			write_stag = be32_to_cpu(iser_hdr->write_stag);
+			write_va = be64_to_cpu(iser_hdr->write_va);
+			pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
+				 write_stag, (unsigned long long)write_va);
+		}
+
+		pr_debug("ISER ISCSI_CTRL PDU\n");
+		break;
+	case ISER_HELLO:
+		pr_err("iSER Hello message\n");
+		break;
+	default:
+		pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
+		break;
+	}
+
+	rc = isert_rx_opcode(isert_conn, rx_desc,
+			     read_stag, read_va, write_stag, write_va);
+}
+
+static void
+isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
+		    unsigned long xfer_len)
+{
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	struct iscsi_hdr *hdr;
+	u64 rx_dma;
+	int rx_buflen, outstanding;
+
+	if ((char *)desc == isert_conn->login_req_buf) {
+		rx_dma = isert_conn->login_req_dma;
+		rx_buflen = ISER_RX_LOGIN_SIZE;
+		pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
+			 rx_dma, rx_buflen);
+	} else {
+		rx_dma = desc->dma_addr;
+		rx_buflen = ISER_RX_PAYLOAD_SIZE;
+		pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
+			 rx_dma, rx_buflen);
+	}
+
+	ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
+
+	hdr = &desc->iscsi_header;
+	pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
+		 hdr->opcode, hdr->itt, hdr->flags,
+		 (int)(xfer_len - ISER_HEADERS_LEN));
+
+	if ((char *)desc == isert_conn->login_req_buf)
+		isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
+				   isert_conn);
+	else
+		isert_rx_do_work(desc, isert_conn);
+
+	ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
+				      DMA_FROM_DEVICE);
+
+	isert_conn->post_recv_buf_count--;
+	pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
+		 isert_conn->post_recv_buf_count);
+
+	if ((char *)desc == isert_conn->login_req_buf)
+		return;
+
+	outstanding = isert_conn->post_recv_buf_count;
+	if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
+		int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
+				ISERT_MIN_POSTED_RX);
+		err = isert_post_recv(isert_conn, count);
+		if (err) {
+			pr_err("isert_post_recv() count: %d failed, %d\n",
+			       count, err);
+		}
+	}
+}
+
+static void
+isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
+{
+	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+
+	pr_debug("isert_unmap_cmd >>>>>>>>>>>>>>>>>>>>>>>\n");
+
+	if (wr->sge) {
+		ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
+		wr->sge = NULL;
+	}
+
+	kfree(wr->send_wr);
+	wr->send_wr = NULL;
+
+	kfree(isert_cmd->ib_sge);
+	isert_cmd->ib_sge = NULL;
+}
+
+static void
+isert_put_cmd(struct isert_cmd *isert_cmd)
+{
+	struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
+	struct isert_conn *isert_conn = isert_cmd->conn;
+	struct iscsi_conn *conn;
+
+	pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
+
+	switch (cmd->iscsi_opcode) {
+	case ISCSI_OP_SCSI_CMD:
+		conn = isert_conn->conn;
+
+		spin_lock_bh(&conn->cmd_lock);
+		if (!list_empty(&cmd->i_conn_node))
+			list_del(&cmd->i_conn_node);
+		spin_unlock_bh(&conn->cmd_lock);
+
+		if (cmd->data_direction == DMA_TO_DEVICE)
+			iscsit_stop_dataout_timer(cmd);
+
+		isert_unmap_cmd(isert_cmd, isert_conn);
+		/*
+		 * Fall-through
+		 */
+	case ISCSI_OP_SCSI_TMFUNC:
+		transport_generic_free_cmd(&cmd->se_cmd, 0);
+		break;
+	case ISCSI_OP_REJECT:
+	case ISCSI_OP_NOOP_OUT:
+		conn = isert_conn->conn;
+
+		spin_lock_bh(&conn->cmd_lock);
+		if (!list_empty(&cmd->i_conn_node))
+			list_del(&cmd->i_conn_node);
+		spin_unlock_bh(&conn->cmd_lock);
+
+		/*
+		 * Handle special case for REJECT when iscsi_add_reject*() has
+		 * overwritten the original iscsi_opcode assignment, and the
+		 * associated cmd->se_cmd needs to be released.
+		 */
+		if (cmd->se_cmd.se_tfo != NULL) {
+			transport_generic_free_cmd(&cmd->se_cmd, 0);
+			break;
+		}
+		/*
+		 * Fall-through
+		 */
+	default:
+		isert_release_cmd(cmd);
+		break;
+	}
+}
+
+static void
+isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
+{
+	if (tx_desc->dma_addr != 0) {
+		pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
+		ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
+				    ISER_HEADERS_LEN, DMA_TO_DEVICE);
+		tx_desc->dma_addr = 0;
+	}
+}
+
+static void
+isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
+		     struct ib_device *ib_dev)
+{
+	if (isert_cmd->sense_buf_dma != 0) {
+		pr_debug("Calling ib_dma_unmap_single for isert_cmd->sense_buf_dma\n");
+		ib_dma_unmap_single(ib_dev, isert_cmd->sense_buf_dma,
+				    isert_cmd->sense_buf_len, DMA_TO_DEVICE);
+		isert_cmd->sense_buf_dma = 0;
+	}
+
+	isert_unmap_tx_desc(tx_desc, ib_dev);
+	isert_put_cmd(isert_cmd);
+}
+
+static void
+isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
+			   struct isert_cmd *isert_cmd)
+{
+	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+	struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct ib_device *ib_dev = isert_cmd->conn->conn_cm_id->device;
+
+	iscsit_stop_dataout_timer(cmd);
+
+	if (wr->sge) {
+		pr_debug("isert_do_rdma_read_comp: Unmapping wr->sge from t_data_sg\n");
+		ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
+		wr->sge = NULL;
+	}
+
+	if (isert_cmd->ib_sge) {
+		pr_debug("isert_do_rdma_read_comp: Freeing isert_cmd->ib_sge\n");
+		kfree(isert_cmd->ib_sge);
+		isert_cmd->ib_sge = NULL;
+	}
+
+	cmd->write_data_done = se_cmd->data_length;
+
+	pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n");
+	spin_lock_bh(&cmd->istate_lock);
+	cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
+	cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+	spin_unlock_bh(&cmd->istate_lock);
+
+	target_execute_cmd(se_cmd);
+}
+
+static void
+isert_do_control_comp(struct work_struct *work)
+{
+	struct isert_cmd *isert_cmd = container_of(work,
+			struct isert_cmd, comp_work);
+	struct isert_conn *isert_conn = isert_cmd->conn;
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
+
+	switch (cmd->i_state) {
+	case ISTATE_SEND_TASKMGTRSP:
+		pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
+
+		atomic_dec(&isert_conn->post_send_buf_count);
+		iscsit_tmr_post_handler(cmd, cmd->conn);
+
+		cmd->i_state = ISTATE_SENT_STATUS;
+		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+		break;
+	case ISTATE_SEND_REJECT:
+		pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
+		atomic_dec(&isert_conn->post_send_buf_count);
+
+		cmd->i_state = ISTATE_SENT_STATUS;
+		complete(&cmd->reject_comp);
+		isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+	case ISTATE_SEND_LOGOUTRSP:
+		pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
+		/*
+		 * Call atomic_dec(&isert_conn->post_send_buf_count)
+		 * from isert_free_conn()
+		 */
+		isert_conn->logout_posted = true;
+		iscsit_logout_post_handler(cmd, cmd->conn);
+		break;
+	default:
+		pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
+		dump_stack();
+		break;
+	}
+}
+
+static void
+isert_response_completion(struct iser_tx_desc *tx_desc,
+			  struct isert_cmd *isert_cmd,
+			  struct isert_conn *isert_conn,
+			  struct ib_device *ib_dev)
+{
+	struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
+
+	if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
+	    cmd->i_state == ISTATE_SEND_LOGOUTRSP) {
+		isert_unmap_tx_desc(tx_desc, ib_dev);
+
+		INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
+		queue_work(isert_comp_wq, &isert_cmd->comp_work);
+		return;
+	}
+	atomic_dec(&isert_conn->post_send_buf_count);
+
+	cmd->i_state = ISTATE_SENT_STATUS;
+	isert_completion_put(tx_desc, isert_cmd, ib_dev);
+}
+
+static void
+isert_send_completion(struct iser_tx_desc *tx_desc,
+		      struct isert_conn *isert_conn)
+{
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
+	struct isert_rdma_wr *wr;
+
+	if (!isert_cmd) {
+		atomic_dec(&isert_conn->post_send_buf_count);
+		isert_unmap_tx_desc(tx_desc, ib_dev);
+		return;
+	}
+	wr = &isert_cmd->rdma_wr;
+
+	switch (wr->iser_ib_op) {
+	case ISER_IB_RECV:
+		pr_err("isert_send_completion: Got ISER_IB_RECV\n");
+		dump_stack();
+		break;
+	case ISER_IB_SEND:
+		pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
+		isert_response_completion(tx_desc, isert_cmd,
+					  isert_conn, ib_dev);
+		break;
+	case ISER_IB_RDMA_WRITE:
+		pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
+		dump_stack();
+		break;
+	case ISER_IB_RDMA_READ:
+		pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
+
+		atomic_dec(&isert_conn->post_send_buf_count);
+		isert_completion_rdma_read(tx_desc, isert_cmd);
+		break;
+	default:
+		pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
+		dump_stack();
+		break;
+	}
+}
+
+static void
+isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
+{
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+
+	if (tx_desc) {
+		struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
+
+		if (!isert_cmd)
+			isert_unmap_tx_desc(tx_desc, ib_dev);
+		else
+			isert_completion_put(tx_desc, isert_cmd, ib_dev);
+	}
+
+	if (isert_conn->post_recv_buf_count == 0 &&
+	    atomic_read(&isert_conn->post_send_buf_count) == 0) {
+		pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+		pr_debug("Calling wake_up from isert_cq_comp_err\n");
+
+		isert_conn->state = ISER_CONN_TERMINATING;
+		wake_up(&isert_conn->conn_wait_comp_err);
+	}
+}
+
+static void
+isert_cq_tx_work(struct work_struct *work)
+{
+	struct isert_cq_desc *cq_desc = container_of(work,
+				struct isert_cq_desc, cq_tx_work);
+	struct isert_device *device = cq_desc->device;
+	int cq_index = cq_desc->cq_index;
+	struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
+	struct isert_conn *isert_conn;
+	struct iser_tx_desc *tx_desc;
+	struct ib_wc wc;
+
+	while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
+		tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
+		isert_conn = wc.qp->qp_context;
+
+		if (wc.status == IB_WC_SUCCESS) {
+			isert_send_completion(tx_desc, isert_conn);
+		} else {
+			pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
+			pr_debug("TX wc.status: 0x%08x\n", wc.status);
+			atomic_dec(&isert_conn->post_send_buf_count);
+			isert_cq_comp_err(tx_desc, isert_conn);
+		}
+	}
+
+	ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
+}
+
+static void
+isert_cq_tx_callback(struct ib_cq *cq, void *context)
+{
+	struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
+
+	INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
+	queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
+}
+
+static void
+isert_cq_rx_work(struct work_struct *work)
+{
+	struct isert_cq_desc *cq_desc = container_of(work,
+			struct isert_cq_desc, cq_rx_work);
+	struct isert_device *device = cq_desc->device;
+	int cq_index = cq_desc->cq_index;
+	struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
+	struct isert_conn *isert_conn;
+	struct iser_rx_desc *rx_desc;
+	struct ib_wc wc;
+	unsigned long xfer_len;
+
+	while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
+		rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
+		isert_conn = wc.qp->qp_context;
+
+		if (wc.status == IB_WC_SUCCESS) {
+			xfer_len = (unsigned long)wc.byte_len;
+			isert_rx_completion(rx_desc, isert_conn, xfer_len);
+		} else {
+			pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
+			if (wc.status != IB_WC_WR_FLUSH_ERR)
+				pr_debug("RX wc.status: 0x%08x\n", wc.status);
+
+			isert_conn->post_recv_buf_count--;
+			isert_cq_comp_err(NULL, isert_conn);
+		}
+	}
+
+	ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
+}
+
+static void
+isert_cq_rx_callback(struct ib_cq *cq, void *context)
+{
+	struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
+
+	INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
+	queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
+}
+
+static int
+isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
+{
+	struct ib_send_wr *wr_failed;
+	int ret;
+
+	atomic_inc(&isert_conn->post_send_buf_count);
+
+	ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
+			   &wr_failed);
+	if (ret) {
+		pr_err("ib_post_send failed with %d\n", ret);
+		atomic_dec(&isert_conn->post_send_buf_count);
+		return ret;
+	}
+	return ret;
+}
+
+static int
+isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+	struct isert_cmd *isert_cmd = container_of(cmd,
+					struct isert_cmd, iscsi_cmd);
+	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
+	struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
+				&isert_cmd->tx_desc.iscsi_header;
+
+	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
+	iscsit_build_rsp_pdu(cmd, conn, true, hdr);
+	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+	/*
+	 * Attach SENSE DATA payload to iSCSI Response PDU
+	 */
+	if (cmd->se_cmd.sense_buffer &&
+	    ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
+	    (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
+		struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+		struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
+		u32 padding, sense_len;
+
+		put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
+				   cmd->sense_buffer);
+		cmd->se_cmd.scsi_sense_length += sizeof(__be16);
+
+		padding = -(cmd->se_cmd.scsi_sense_length) & 3;
+		hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
+		sense_len = cmd->se_cmd.scsi_sense_length + padding;
+
+		isert_cmd->sense_buf_dma = ib_dma_map_single(ib_dev,
+				(void *)cmd->sense_buffer, sense_len,
+				DMA_TO_DEVICE);
+
+		isert_cmd->sense_buf_len = sense_len;
+		tx_dsg->addr	= isert_cmd->sense_buf_dma;
+		tx_dsg->length	= sense_len;
+		tx_dsg->lkey	= isert_conn->conn_mr->lkey;
+		isert_cmd->tx_desc.num_sge = 2;
+	}
+
+	isert_init_send_wr(isert_cmd, send_wr);
+
+	pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+
+	return isert_post_response(isert_conn, isert_cmd);
+}
+
+static int
+isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+		bool nopout_response)
+{
+	struct isert_cmd *isert_cmd = container_of(cmd,
+				struct isert_cmd, iscsi_cmd);
+	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
+
+	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
+	iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
+			       &isert_cmd->tx_desc.iscsi_header,
+			       nopout_response);
+	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+	isert_init_send_wr(isert_cmd, send_wr);
+
+	pr_debug("Posting NOPIN Reponse IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+
+	return isert_post_response(isert_conn, isert_cmd);
+}
+
+static int
+isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct isert_cmd *isert_cmd = container_of(cmd,
+				struct isert_cmd, iscsi_cmd);
+	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
+
+	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
+	iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
+				&isert_cmd->tx_desc.iscsi_header);
+	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+	isert_init_send_wr(isert_cmd, send_wr);
+
+	pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+
+	return isert_post_response(isert_conn, isert_cmd);
+}
+
+static int
+isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct isert_cmd *isert_cmd = container_of(cmd,
+				struct isert_cmd, iscsi_cmd);
+	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
+
+	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
+	iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
+				  &isert_cmd->tx_desc.iscsi_header);
+	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+	isert_init_send_wr(isert_cmd, send_wr);
+
+	pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+
+	return isert_post_response(isert_conn, isert_cmd);
+}
+
+static int
+isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct isert_cmd *isert_cmd = container_of(cmd,
+				struct isert_cmd, iscsi_cmd);
+	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+	struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
+
+	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
+	iscsit_build_reject(cmd, conn, (struct iscsi_reject *)
+				&isert_cmd->tx_desc.iscsi_header);
+	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+	isert_init_send_wr(isert_cmd, send_wr);
+
+	pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+
+	return isert_post_response(isert_conn, isert_cmd);
+}
+
+static int
+isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
+		    struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
+		    u32 data_left, u32 offset)
+{
+	struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
+	struct scatterlist *sg_start, *tmp_sg;
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	u32 sg_off, page_off;
+	int i = 0, sg_nents;
+
+	sg_off = offset / PAGE_SIZE;
+	sg_start = &cmd->se_cmd.t_data_sg[sg_off];
+	sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
+	page_off = offset % PAGE_SIZE;
+
+	send_wr->sg_list = ib_sge;
+	send_wr->num_sge = sg_nents;
+	send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
+	/*
+	 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
+	 */
+	for_each_sg(sg_start, tmp_sg, sg_nents, i) {
+		pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
+			 (unsigned long long)tmp_sg->dma_address,
+			 tmp_sg->length, page_off);
+
+		ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
+		ib_sge->length = min_t(u32, data_left,
+				ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
+		ib_sge->lkey = isert_conn->conn_mr->lkey;
+
+		pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u\n",
+			 ib_sge->addr, ib_sge->length);
+		page_off = 0;
+		data_left -= ib_sge->length;
+		ib_sge++;
+		pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
+	}
+
+	pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
+		 send_wr->sg_list, send_wr->num_sge);
+
+	return sg_nents;
+}
+
+static int
+isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct isert_cmd *isert_cmd = container_of(cmd,
+					struct isert_cmd, iscsi_cmd);
+	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+	struct ib_send_wr *wr_failed, *send_wr;
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	struct ib_sge *ib_sge;
+	struct scatterlist *sg;
+	u32 offset = 0, data_len, data_left, rdma_write_max;
+	int rc, ret = 0, count, sg_nents, i, ib_sge_cnt;
+
+	pr_debug("RDMA_WRITE: data_length: %u\n", se_cmd->data_length);
+
+	sg = &se_cmd->t_data_sg[0];
+	sg_nents = se_cmd->t_data_nents;
+
+	count = ib_dma_map_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
+	if (unlikely(!count)) {
+		pr_err("Unable to map put_datain SGs\n");
+		return -EINVAL;
+	}
+	wr->sge = sg;
+	wr->num_sge = sg_nents;
+	pr_debug("Mapped IB count: %u sg: %p sg_nents: %u for RDMA_WRITE\n",
+		 count, sg, sg_nents);
+
+	ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
+	if (!ib_sge) {
+		pr_warn("Unable to allocate datain ib_sge\n");
+		ret = -ENOMEM;
+		goto unmap_sg;
+	}
+	isert_cmd->ib_sge = ib_sge;
+
+	pr_debug("Allocated ib_sge: %p from t_data_ents: %d for RDMA_WRITE\n",
+		 ib_sge, se_cmd->t_data_nents);
+
+	wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
+	wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
+				GFP_KERNEL);
+	if (!wr->send_wr) {
+		pr_err("Unable to allocate wr->send_wr\n");
+		ret = -ENOMEM;
+		goto unmap_sg;
+	}
+	pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
+		 wr->send_wr, wr->send_wr_num);
+
+	iscsit_increment_maxcmdsn(cmd, conn->sess);
+	cmd->stat_sn = conn->stat_sn++;
+
+	wr->isert_cmd = isert_cmd;
+	rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
+	data_left = se_cmd->data_length;
+
+	for (i = 0; i < wr->send_wr_num; i++) {
+		send_wr = &isert_cmd->rdma_wr.send_wr[i];
+		data_len = min(data_left, rdma_write_max);
+
+		send_wr->opcode = IB_WR_RDMA_WRITE;
+		send_wr->send_flags = 0;
+		send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
+		send_wr->wr.rdma.rkey = isert_cmd->read_stag;
+
+		ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
+					send_wr, data_len, offset);
+		ib_sge += ib_sge_cnt;
+
+		if (i + 1 == wr->send_wr_num)
+			send_wr->next = &isert_cmd->tx_desc.send_wr;
+		else
+			send_wr->next = &wr->send_wr[i + 1];
+
+		offset += data_len;
+		data_left -= data_len;
+	}
+	/*
+	 * Build isert_conn->tx_desc for iSCSI response PDU and attach
+	 */
+	isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
+	iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
+			     &isert_cmd->tx_desc.iscsi_header);
+	isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+	isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
+
+	atomic_inc(&isert_conn->post_send_buf_count);
+
+	rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
+	if (rc) {
+		pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
+		atomic_dec(&isert_conn->post_send_buf_count);
+	}
+	pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n");
+	return 1;
+
+unmap_sg:
+	ib_dma_unmap_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
+	return ret;
+}
+
+static int
+isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
+{
+	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct isert_cmd *isert_cmd = container_of(cmd,
+					struct isert_cmd, iscsi_cmd);
+	struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+	struct ib_send_wr *wr_failed, *send_wr;
+	struct ib_sge *ib_sge;
+	struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+	struct scatterlist *sg_start;
+	u32 sg_off, sg_nents, page_off, va_offset = 0;
+	u32 offset = 0, data_len, data_left, rdma_write_max;
+	int rc, ret = 0, count, i, ib_sge_cnt;
+
+	pr_debug("RDMA_READ: data_length: %u write_data_done: %u\n",
+		 se_cmd->data_length, cmd->write_data_done);
+
+	sg_off = cmd->write_data_done / PAGE_SIZE;
+	sg_start = &cmd->se_cmd.t_data_sg[sg_off];
+	page_off = cmd->write_data_done % PAGE_SIZE;
+
+	pr_debug("RDMA_READ: sg_off: %d, sg_start: %p page_off: %d\n",
+		 sg_off, sg_start, page_off);
+
+	data_left = se_cmd->data_length - cmd->write_data_done;
+	sg_nents = se_cmd->t_data_nents - sg_off;
+
+	pr_debug("RDMA_READ: data_left: %d, sg_nents: %d\n",
+		 data_left, sg_nents);
+
+	count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
+	if (unlikely(!count)) {
+		pr_err("Unable to map get_dataout SGs\n");
+		return -EINVAL;
+	}
+	wr->sge = sg_start;
+	wr->num_sge = sg_nents;
+	pr_debug("Mapped IB count: %u sg_start: %p sg_nents: %u for RDMA_READ\n",
+		 count, sg_start, sg_nents);
+
+	ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
+	if (!ib_sge) {
+		pr_warn("Unable to allocate dataout ib_sge\n");
+		ret = -ENOMEM;
+		goto unmap_sg;
+	}
+	isert_cmd->ib_sge = ib_sge;
+
+	pr_debug("Using ib_sge: %p from sg_ents: %d for RDMA_READ\n",
+		 ib_sge, sg_nents);
+
+	wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
+	wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
+				GFP_KERNEL);
+	if (!wr->send_wr) {
+		pr_debug("Unable to allocate wr->send_wr\n");
+		ret = -ENOMEM;
+		goto unmap_sg;
+	}
+	pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
+		 wr->send_wr, wr->send_wr_num);
+
+	isert_cmd->tx_desc.isert_cmd = isert_cmd;
+
+	wr->iser_ib_op = ISER_IB_RDMA_READ;
+	wr->isert_cmd = isert_cmd;
+	rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
+	offset = cmd->write_data_done;
+
+	for (i = 0; i < wr->send_wr_num; i++) {
+		send_wr = &isert_cmd->rdma_wr.send_wr[i];
+		data_len = min(data_left, rdma_write_max);
+
+		send_wr->opcode = IB_WR_RDMA_READ;
+		send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
+		send_wr->wr.rdma.rkey = isert_cmd->write_stag;
+
+		ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
+					send_wr, data_len, offset);
+		ib_sge += ib_sge_cnt;
+
+		if (i + 1 == wr->send_wr_num)
+			send_wr->send_flags = IB_SEND_SIGNALED;
+		else
+			send_wr->next = &wr->send_wr[i + 1];
+
+		offset += data_len;
+		va_offset += data_len;
+		data_left -= data_len;
+	}
+
+	atomic_inc(&isert_conn->post_send_buf_count);
+
+	rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
+	if (rc) {
+		pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
+		atomic_dec(&isert_conn->post_send_buf_count);
+	}
+	pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n");
+	return 0;
+
+unmap_sg:
+	ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
+	return ret;
+}
+
+static int
+isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+{
+	int ret;
+
+	switch (state) {
+	case ISTATE_SEND_NOPIN_WANT_RESPONSE:
+		ret = isert_put_nopin(cmd, conn, false);
+		break;
+	default:
+		pr_err("Unknown immediate state: 0x%02x\n", state);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int
+isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+{
+	int ret;
+
+	switch (state) {
+	case ISTATE_SEND_LOGOUTRSP:
+		ret = isert_put_logout_rsp(cmd, conn);
+		if (!ret) {
+			pr_debug("Returning iSER Logout -EAGAIN\n");
+			ret = -EAGAIN;
+		}
+		break;
+	case ISTATE_SEND_NOPIN:
+		ret = isert_put_nopin(cmd, conn, true);
+		break;
+	case ISTATE_SEND_TASKMGTRSP:
+		ret = isert_put_tm_rsp(cmd, conn);
+		break;
+	case ISTATE_SEND_REJECT:
+		ret = isert_put_reject(cmd, conn);
+		break;
+	case ISTATE_SEND_STATUS:
+		/*
+		 * Special case for sending non GOOD SCSI status from TX thread
+		 * context during pre se_cmd excecution failure.
+		 */
+		ret = isert_put_response(conn, cmd);
+		break;
+	default:
+		pr_err("Unknown response state: 0x%02x\n", state);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int
+isert_setup_np(struct iscsi_np *np,
+	       struct __kernel_sockaddr_storage *ksockaddr)
+{
+	struct isert_np *isert_np;
+	struct rdma_cm_id *isert_lid;
+	struct sockaddr *sa;
+	int ret;
+
+	isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
+	if (!isert_np) {
+		pr_err("Unable to allocate struct isert_np\n");
+		return -ENOMEM;
+	}
+	init_waitqueue_head(&isert_np->np_accept_wq);
+	mutex_init(&isert_np->np_accept_mutex);
+	INIT_LIST_HEAD(&isert_np->np_accept_list);
+	init_completion(&isert_np->np_login_comp);
+
+	sa = (struct sockaddr *)ksockaddr;
+	pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
+	/*
+	 * Setup the np->np_sockaddr from the passed sockaddr setup
+	 * in iscsi_target_configfs.c code..
+	 */
+	memcpy(&np->np_sockaddr, ksockaddr,
+	       sizeof(struct __kernel_sockaddr_storage));
+
+	isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
+				IB_QPT_RC);
+	if (IS_ERR(isert_lid)) {
+		pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
+		       PTR_ERR(isert_lid));
+		ret = PTR_ERR(isert_lid);
+		goto out;
+	}
+
+	ret = rdma_bind_addr(isert_lid, sa);
+	if (ret) {
+		pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
+		goto out_lid;
+	}
+
+	ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
+	if (ret) {
+		pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
+		goto out_lid;
+	}
+
+	isert_np->np_cm_id = isert_lid;
+	np->np_context = isert_np;
+	pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
+
+	return 0;
+
+out_lid:
+	rdma_destroy_id(isert_lid);
+out:
+	kfree(isert_np);
+	return ret;
+}
+
+static int
+isert_check_accept_queue(struct isert_np *isert_np)
+{
+	int empty;
+
+	mutex_lock(&isert_np->np_accept_mutex);
+	empty = list_empty(&isert_np->np_accept_list);
+	mutex_unlock(&isert_np->np_accept_mutex);
+
+	return empty;
+}
+
+static int
+isert_rdma_accept(struct isert_conn *isert_conn)
+{
+	struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
+	struct rdma_conn_param cp;
+	int ret;
+
+	memset(&cp, 0, sizeof(struct rdma_conn_param));
+	cp.responder_resources = isert_conn->responder_resources;
+	cp.initiator_depth = isert_conn->initiator_depth;
+	cp.retry_count = 7;
+	cp.rnr_retry_count = 7;
+
+	pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
+
+	ret = rdma_accept(cm_id, &cp);
+	if (ret) {
+		pr_err("rdma_accept() failed with: %d\n", ret);
+		return ret;
+	}
+
+	pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
+
+	return 0;
+}
+
+static int
+isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+	struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+	int ret;
+
+	pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
+
+	ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
+	if (ret)
+		return ret;
+
+	pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
+	return 0;
+}
+
+static void
+isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
+		    struct isert_conn *isert_conn)
+{
+	struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
+	struct rdma_route *cm_route = &cm_id->route;
+	struct sockaddr_in *sock_in;
+	struct sockaddr_in6 *sock_in6;
+
+	conn->login_family = np->np_sockaddr.ss_family;
+
+	if (np->np_sockaddr.ss_family == AF_INET6) {
+		sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
+		snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
+			 &sock_in6->sin6_addr.in6_u);
+		conn->login_port = ntohs(sock_in6->sin6_port);
+
+		sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
+		snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
+			 &sock_in6->sin6_addr.in6_u);
+		conn->local_port = ntohs(sock_in6->sin6_port);
+	} else {
+		sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
+		sprintf(conn->login_ip, "%pI4",
+			&sock_in->sin_addr.s_addr);
+		conn->login_port = ntohs(sock_in->sin_port);
+
+		sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
+		sprintf(conn->local_ip, "%pI4",
+			&sock_in->sin_addr.s_addr);
+		conn->local_port = ntohs(sock_in->sin_port);
+	}
+}
+
+static int
+isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
+{
+	struct isert_np *isert_np = (struct isert_np *)np->np_context;
+	struct isert_conn *isert_conn;
+	int max_accept = 0, ret;
+
+accept_wait:
+	ret = wait_event_interruptible(isert_np->np_accept_wq,
+			!isert_check_accept_queue(isert_np) ||
+			np->np_thread_state == ISCSI_NP_THREAD_RESET);
+	if (max_accept > 5)
+		return -ENODEV;
+
+	spin_lock_bh(&np->np_thread_lock);
+	if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+		spin_unlock_bh(&np->np_thread_lock);
+		pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
+		return -ENODEV;
+	}
+	spin_unlock_bh(&np->np_thread_lock);
+
+	mutex_lock(&isert_np->np_accept_mutex);
+	if (list_empty(&isert_np->np_accept_list)) {
+		mutex_unlock(&isert_np->np_accept_mutex);
+		max_accept++;
+		goto accept_wait;
+	}
+	isert_conn = list_first_entry(&isert_np->np_accept_list,
+			struct isert_conn, conn_accept_node);
+	list_del_init(&isert_conn->conn_accept_node);
+	mutex_unlock(&isert_np->np_accept_mutex);
+
+	conn->context = isert_conn;
+	isert_conn->conn = conn;
+	max_accept = 0;
+
+	ret = isert_rdma_post_recvl(isert_conn);
+	if (ret)
+		return ret;
+
+	ret = isert_rdma_accept(isert_conn);
+	if (ret)
+		return ret;
+
+	isert_set_conn_info(np, conn, isert_conn);
+
+	pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
+	return 0;
+}
+
+static void
+isert_free_np(struct iscsi_np *np)
+{
+	struct isert_np *isert_np = (struct isert_np *)np->np_context;
+
+	rdma_destroy_id(isert_np->np_cm_id);
+
+	np->np_context = NULL;
+	kfree(isert_np);
+}
+
+static void isert_free_conn(struct iscsi_conn *conn)
+{
+	struct isert_conn *isert_conn = conn->context;
+
+	pr_debug("isert_free_conn: Starting \n");
+	/*
+	 * Decrement post_send_buf_count for special case when called
+	 * from isert_do_control_comp() -> iscsit_logout_post_handler()
+	 */
+	if (isert_conn->logout_posted)
+		atomic_dec(&isert_conn->post_send_buf_count);
+
+	if (isert_conn->conn_cm_id)
+		rdma_disconnect(isert_conn->conn_cm_id);
+	/*
+	 * Only wait for conn_wait_comp_err if the isert_conn made it
+	 * into full feature phase..
+	 */
+	if (isert_conn->state > ISER_CONN_INIT) {
+		pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
+			 isert_conn->state);
+		wait_event(isert_conn->conn_wait_comp_err,
+			   isert_conn->state == ISER_CONN_TERMINATING);
+		pr_debug("isert_free_conn: After wait_event #1 >>>>>>>>>>>>\n");
+	}
+
+	pr_debug("isert_free_conn: wait_event conn_wait %d\n", isert_conn->state);
+	wait_event(isert_conn->conn_wait, isert_conn->state == ISER_CONN_DOWN);
+	pr_debug("isert_free_conn: After wait_event #2 >>>>>>>>>>>>>>>>>>>>\n");
+
+	isert_put_conn(isert_conn);
+}
+
+static struct iscsit_transport iser_target_transport = {
+	.name			= "IB/iSER",
+	.transport_type		= ISCSI_INFINIBAND,
+	.owner			= THIS_MODULE,
+	.iscsit_setup_np	= isert_setup_np,
+	.iscsit_accept_np	= isert_accept_np,
+	.iscsit_free_np		= isert_free_np,
+	.iscsit_free_conn	= isert_free_conn,
+	.iscsit_alloc_cmd	= isert_alloc_cmd,
+	.iscsit_get_login_rx	= isert_get_login_rx,
+	.iscsit_put_login_tx	= isert_put_login_tx,
+	.iscsit_immediate_queue	= isert_immediate_queue,
+	.iscsit_response_queue	= isert_response_queue,
+	.iscsit_get_dataout	= isert_get_dataout,
+	.iscsit_queue_data_in	= isert_put_datain,
+	.iscsit_queue_status	= isert_put_response,
+};
+
+static int __init isert_init(void)
+{
+	int ret;
+
+	isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
+	if (!isert_rx_wq) {
+		pr_err("Unable to allocate isert_rx_wq\n");
+		return -ENOMEM;
+	}
+
+	isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
+	if (!isert_comp_wq) {
+		pr_err("Unable to allocate isert_comp_wq\n");
+		ret = -ENOMEM;
+		goto destroy_rx_wq;
+	}
+
+	isert_cmd_cache = kmem_cache_create("isert_cmd_cache",
+			sizeof(struct isert_cmd), __alignof__(struct isert_cmd),
+			0, NULL);
+	if (!isert_cmd_cache) {
+		pr_err("Unable to create isert_cmd_cache\n");
+		ret = -ENOMEM;
+		goto destroy_tx_cq;
+	}
+
+	iscsit_register_transport(&iser_target_transport);
+	pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
+	return 0;
+
+destroy_tx_cq:
+	destroy_workqueue(isert_comp_wq);
+destroy_rx_wq:
+	destroy_workqueue(isert_rx_wq);
+	return ret;
+}
+
+static void __exit isert_exit(void)
+{
+	kmem_cache_destroy(isert_cmd_cache);
+	destroy_workqueue(isert_comp_wq);
+	destroy_workqueue(isert_rx_wq);
+	iscsit_unregister_transport(&iser_target_transport);
+	pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
+}
+
+MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
+MODULE_VERSION("0.1");
+MODULE_AUTHOR("nab@Linux-iSCSI.org");
+MODULE_LICENSE("GPL");
+
+module_init(isert_init);
+module_exit(isert_exit);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
new file mode 100644
index 0000000..b104f4c
--- /dev/null
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -0,0 +1,138 @@
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/rdma_cm.h>
+
+#define ISERT_RDMA_LISTEN_BACKLOG	10
+
+enum isert_desc_type {
+	ISCSI_TX_CONTROL,
+	ISCSI_TX_DATAIN
+};
+
+enum iser_ib_op_code {
+	ISER_IB_RECV,
+	ISER_IB_SEND,
+	ISER_IB_RDMA_WRITE,
+	ISER_IB_RDMA_READ,
+};
+
+enum iser_conn_state {
+	ISER_CONN_INIT,
+	ISER_CONN_UP,
+	ISER_CONN_TERMINATING,
+	ISER_CONN_DOWN,
+};
+
+struct iser_rx_desc {
+	struct iser_hdr iser_header;
+	struct iscsi_hdr iscsi_header;
+	char		data[ISER_RECV_DATA_SEG_LEN];
+	u64		dma_addr;
+	struct ib_sge	rx_sg;
+	char		pad[ISER_RX_PAD_SIZE];
+} __packed;
+
+struct iser_tx_desc {
+	struct iser_hdr iser_header;
+	struct iscsi_hdr iscsi_header;
+	enum isert_desc_type type;
+	u64		dma_addr;
+	struct ib_sge	tx_sg[2];
+	int		num_sge;
+	struct isert_cmd *isert_cmd;
+	struct ib_send_wr send_wr;
+} __packed;
+
+struct isert_rdma_wr {
+	struct list_head	wr_list;
+	struct isert_cmd	*isert_cmd;
+	enum iser_ib_op_code	iser_ib_op;
+	struct ib_sge		*ib_sge;
+	int			num_sge;
+	struct scatterlist	*sge;
+	int			send_wr_num;
+	struct ib_send_wr	*send_wr;
+};
+
+struct isert_cmd {
+	uint32_t		read_stag;
+	uint32_t		write_stag;
+	uint64_t		read_va;
+	uint64_t		write_va;
+	u64			sense_buf_dma;
+	u32			sense_buf_len;
+	u32			read_va_off;
+	u32			write_va_off;
+	u32			rdma_wr_num;
+	struct isert_conn	*conn;
+	struct iscsi_cmd	iscsi_cmd;
+	struct ib_sge		*ib_sge;
+	struct iser_tx_desc	tx_desc;
+	struct isert_rdma_wr	rdma_wr;
+	struct work_struct	comp_work;
+};
+
+struct isert_device;
+
+struct isert_conn {
+	enum iser_conn_state	state;
+	bool			logout_posted;
+	int			post_recv_buf_count;
+	atomic_t		post_send_buf_count;
+	u32			responder_resources;
+	u32			initiator_depth;
+	u32			max_sge;
+	char			*login_buf;
+	char			*login_req_buf;
+	char			*login_rsp_buf;
+	u64			login_req_dma;
+	u64			login_rsp_dma;
+	unsigned int		conn_rx_desc_head;
+	struct iser_rx_desc	*conn_rx_descs;
+	struct ib_recv_wr	conn_rx_wr[ISERT_MIN_POSTED_RX];
+	struct iscsi_conn	*conn;
+	struct list_head	conn_accept_node;
+	struct completion	conn_login_comp;
+	struct iser_tx_desc	conn_login_tx_desc;
+	struct rdma_cm_id	*conn_cm_id;
+	struct ib_pd		*conn_pd;
+	struct ib_mr		*conn_mr;
+	struct ib_qp		*conn_qp;
+	struct isert_device	*conn_device;
+	struct work_struct	conn_logout_work;
+	wait_queue_head_t	conn_wait;
+	wait_queue_head_t	conn_wait_comp_err;
+	struct kref		conn_kref;
+};
+
+#define ISERT_MAX_CQ 64
+
+struct isert_cq_desc {
+	struct isert_device	*device;
+	int			cq_index;
+	struct work_struct	cq_rx_work;
+	struct work_struct	cq_tx_work;
+};
+
+struct isert_device {
+	int			cqs_used;
+	int			refcount;
+	int			cq_active_qps[ISERT_MAX_CQ];
+	struct ib_device	*ib_device;
+	struct ib_pd		*dev_pd;
+	struct ib_mr		*dev_mr;
+	struct ib_cq		*dev_rx_cq[ISERT_MAX_CQ];
+	struct ib_cq		*dev_tx_cq[ISERT_MAX_CQ];
+	struct isert_cq_desc	*cq_desc;
+	struct list_head	dev_node;
+};
+
+struct isert_np {
+	wait_queue_head_t	np_accept_wq;
+	struct rdma_cm_id	*np_cm_id;
+	struct mutex		np_accept_mutex;
+	struct list_head	np_accept_list;
+	struct completion	np_login_comp;
+};
diff --git a/drivers/infiniband/ulp/isert/isert_proto.h b/drivers/infiniband/ulp/isert/isert_proto.h
new file mode 100644
index 0000000..4dccd313
--- /dev/null
+++ b/drivers/infiniband/ulp/isert/isert_proto.h
@@ -0,0 +1,47 @@
+/* From iscsi_iser.h */
+
+struct iser_hdr {
+	u8	flags;
+	u8	rsvd[3];
+	__be32	write_stag; /* write rkey */
+	__be64	write_va;
+	__be32	read_stag;  /* read rkey */
+	__be64	read_va;
+} __packed;
+
+/*Constant PDU lengths calculations */
+#define ISER_HEADERS_LEN  (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
+
+#define ISER_RECV_DATA_SEG_LEN  8192
+#define ISER_RX_PAYLOAD_SIZE    (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
+#define ISER_RX_LOGIN_SIZE      (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
+
+/* QP settings */
+/* Maximal bounds on received asynchronous PDUs */
+#define ISERT_MAX_TX_MISC_PDUS	4 /* NOOP_IN(2) , ASYNC_EVENT(2)   */
+
+#define ISERT_MAX_RX_MISC_PDUS	6 /* NOOP_OUT(2), TEXT(1),         *
+				   * SCSI_TMFUNC(2), LOGOUT(1) */
+
+#define ISCSI_DEF_XMIT_CMDS_MAX 128 /* from libiscsi.h, must be power of 2 */
+
+#define ISERT_QP_MAX_RECV_DTOS	(ISCSI_DEF_XMIT_CMDS_MAX)
+
+#define ISERT_MIN_POSTED_RX	(ISCSI_DEF_XMIT_CMDS_MAX >> 2)
+
+#define ISERT_INFLIGHT_DATAOUTS	8
+
+#define ISERT_QP_MAX_REQ_DTOS	(ISCSI_DEF_XMIT_CMDS_MAX *    \
+				(1 + ISERT_INFLIGHT_DATAOUTS) + \
+				ISERT_MAX_TX_MISC_PDUS	+ \
+				ISERT_MAX_RX_MISC_PDUS)
+
+#define ISER_RX_PAD_SIZE	(ISER_RECV_DATA_SEG_LEN + 4096 - \
+		(ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge)))
+
+#define ISER_VER	0x10
+#define ISER_WSV	0x08
+#define ISER_RSV	0x04
+#define ISCSI_CTRL	0x10
+#define ISER_HELLO	0x20
+#define ISER_HELLORPLY	0x30
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 4fd9d6a..5a2c754 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -846,7 +846,7 @@
 	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
 		set_bit(bit, kaddr);
 	else
-		test_and_set_bit_le(bit, kaddr);
+		set_bit_le(bit, kaddr);
 	kunmap_atomic(kaddr);
 	pr_debug("set file bit %lu page %lu\n", bit, page->index);
 	/* record page number so it gets flushed to disk when unplug occurs */
@@ -868,7 +868,7 @@
 	if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags))
 		clear_bit(bit, paddr);
 	else
-		test_and_clear_bit_le(bit, paddr);
+		clear_bit_le(bit, paddr);
 	kunmap_atomic(paddr);
 	if (!test_page_attr(bitmap, page->index, BITMAP_PAGE_NEEDWRITE)) {
 		set_page_attr(bitmap, page->index, BITMAP_PAGE_PENDING);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 311e3d3..1d3fe1a 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1279,6 +1279,31 @@
 	return DM_MAPIO_SUBMITTED;
 }
 
+static const char *decipher_sync_action(struct mddev *mddev)
+{
+	if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
+		return "frozen";
+
+	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+	    (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
+		if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+			return "reshape";
+
+		if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+			if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+				return "resync";
+			else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
+				return "check";
+			return "repair";
+		}
+
+		if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
+			return "recover";
+	}
+
+	return "idle";
+}
+
 static void raid_status(struct dm_target *ti, status_type_t type,
 			unsigned status_flags, char *result, unsigned maxlen)
 {
@@ -1298,8 +1323,18 @@
 			sync = rs->md.recovery_cp;
 
 		if (sync >= rs->md.resync_max_sectors) {
+			/*
+			 * Sync complete.
+			 */
 			array_in_sync = 1;
 			sync = rs->md.resync_max_sectors;
+		} else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) {
+			/*
+			 * If "check" or "repair" is occurring, the array has
+			 * undergone and initial sync and the health characters
+			 * should not be 'a' anymore.
+			 */
+			array_in_sync = 1;
 		} else {
 			/*
 			 * The array may be doing an initial sync, or it may
@@ -1311,6 +1346,7 @@
 				if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
 					array_in_sync = 1;
 		}
+
 		/*
 		 * Status characters:
 		 *  'D' = Dead/Failed device
@@ -1339,6 +1375,21 @@
 		       (unsigned long long) sync,
 		       (unsigned long long) rs->md.resync_max_sectors);
 
+		/*
+		 * Sync action:
+		 *   See Documentation/device-mapper/dm-raid.c for
+		 *   information on each of these states.
+		 */
+		DMEMIT(" %s", decipher_sync_action(&rs->md));
+
+		/*
+		 * resync_mismatches/mismatch_cnt
+		 *   This field shows the number of discrepancies found when
+		 *   performing a "check" of the array.
+		 */
+		DMEMIT(" %llu",
+		       (unsigned long long)
+		       atomic64_read(&rs->md.resync_mismatches));
 		break;
 	case STATUSTYPE_TABLE:
 		/* The string you would use to construct this array */
@@ -1425,7 +1476,62 @@
 	}
 }
 
-static int raid_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
+static int raid_message(struct dm_target *ti, unsigned argc, char **argv)
+{
+	struct raid_set *rs = ti->private;
+	struct mddev *mddev = &rs->md;
+
+	if (!strcasecmp(argv[0], "reshape")) {
+		DMERR("Reshape not supported.");
+		return -EINVAL;
+	}
+
+	if (!mddev->pers || !mddev->pers->sync_request)
+		return -EINVAL;
+
+	if (!strcasecmp(argv[0], "frozen"))
+		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+	else
+		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+
+	if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
+		if (mddev->sync_thread) {
+			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+			md_reap_sync_thread(mddev);
+		}
+	} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
+		   test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+		return -EBUSY;
+	else if (!strcasecmp(argv[0], "resync"))
+		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+	else if (!strcasecmp(argv[0], "recover")) {
+		set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+	} else {
+		if (!strcasecmp(argv[0], "check"))
+			set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+		else if (!!strcasecmp(argv[0], "repair"))
+			return -EINVAL;
+		set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+		set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+	}
+	if (mddev->ro == 2) {
+		/* A write to sync_action is enough to justify
+		 * canceling read-auto mode
+		 */
+		mddev->ro = 0;
+		if (!mddev->suspended)
+			md_wakeup_thread(mddev->sync_thread);
+	}
+	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+	if (!mddev->suspended)
+		md_wakeup_thread(mddev->thread);
+
+	return 0;
+}
+
+static int raid_iterate_devices(struct dm_target *ti,
+				iterate_devices_callout_fn fn, void *data)
 {
 	struct raid_set *rs = ti->private;
 	unsigned i;
@@ -1482,12 +1588,13 @@
 
 static struct target_type raid_target = {
 	.name = "raid",
-	.version = {1, 4, 2},
+	.version = {1, 5, 0},
 	.module = THIS_MODULE,
 	.ctr = raid_ctr,
 	.dtr = raid_dtr,
 	.map = raid_map,
 	.status = raid_status,
+	.message = raid_message,
 	.iterate_devices = raid_iterate_devices,
 	.io_hints = raid_io_hints,
 	.presuspend = raid_presuspend,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index aeceedf..4c74424 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -72,6 +72,9 @@
 static struct workqueue_struct *md_wq;
 static struct workqueue_struct *md_misc_wq;
 
+static int remove_and_add_spares(struct mddev *mddev,
+				 struct md_rdev *this);
+
 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); }
 
 /*
@@ -1564,8 +1567,8 @@
 					     sector, count, 1) == 0)
 				return -EINVAL;
 		}
-	} else if (sb->bblog_offset == 0)
-		rdev->badblocks.shift = -1;
+	} else if (sb->bblog_offset != 0)
+		rdev->badblocks.shift = 0;
 
 	if (!refdev) {
 		ret = 1;
@@ -2411,6 +2414,11 @@
 	int nospares = 0;
 	int any_badblocks_changed = 0;
 
+	if (mddev->ro) {
+		if (force_change)
+			set_bit(MD_CHANGE_DEVS, &mddev->flags);
+		return;
+	}
 repeat:
 	/* First make sure individual recovery_offsets are correct */
 	rdev_for_each(rdev, mddev) {
@@ -2800,12 +2808,10 @@
 		/* personality does all needed checks */
 		if (rdev->mddev->pers->hot_remove_disk == NULL)
 			return -EINVAL;
-		err = rdev->mddev->pers->
-			hot_remove_disk(rdev->mddev, rdev);
-		if (err)
-			return err;
-		sysfs_unlink_rdev(rdev->mddev, rdev);
-		rdev->raid_disk = -1;
+		clear_bit(Blocked, &rdev->flags);
+		remove_and_add_spares(rdev->mddev, rdev);
+		if (rdev->raid_disk >= 0)
+			return -EBUSY;
 		set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
 		md_wakeup_thread(rdev->mddev->thread);
 	} else if (rdev->mddev->pers) {
@@ -3221,7 +3227,7 @@
 	 * be used - I wonder if that matters
 	 */
 	rdev->badblocks.count = 0;
-	rdev->badblocks.shift = 0;
+	rdev->badblocks.shift = -1; /* disabled until explicitly enabled */
 	rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL);
 	seqlock_init(&rdev->badblocks.lock);
 	if (rdev->badblocks.page == NULL)
@@ -3293,9 +3299,6 @@
 			goto abort_free;
 		}
 	}
-	if (super_format == -1)
-		/* hot-add for 0.90, or non-persistent: so no badblocks */
-		rdev->badblocks.shift = -1;
 
 	return rdev;
 
@@ -4225,8 +4228,6 @@
 	return sprintf(page, "%s\n", type);
 }
 
-static void reap_sync_thread(struct mddev *mddev);
-
 static ssize_t
 action_store(struct mddev *mddev, const char *page, size_t len)
 {
@@ -4241,7 +4242,7 @@
 	if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
 		if (mddev->sync_thread) {
 			set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-			reap_sync_thread(mddev);
+			md_reap_sync_thread(mddev);
 		}
 	} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
 		   test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
@@ -5279,7 +5280,7 @@
 	if (mddev->sync_thread) {
 		set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
 		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-		reap_sync_thread(mddev);
+		md_reap_sync_thread(mddev);
 	}
 
 	del_timer_sync(&mddev->safemode_timer);
@@ -5287,7 +5288,8 @@
 	bitmap_flush(mddev);
 	md_super_wait(mddev);
 
-	if (!mddev->in_sync || mddev->flags) {
+	if (mddev->ro == 0 &&
+	    (!mddev->in_sync || mddev->flags)) {
 		/* mark array as shutdown cleanly */
 		mddev->in_sync = 1;
 		md_update_sb(mddev, 1);
@@ -5810,7 +5812,7 @@
 		else
 			sysfs_notify_dirent_safe(rdev->sysfs_state);
 
-		md_update_sb(mddev, 1);
+		set_bit(MD_CHANGE_DEVS, &mddev->flags);
 		if (mddev->degraded)
 			set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -5877,6 +5879,9 @@
 	if (!rdev)
 		return -ENXIO;
 
+	clear_bit(Blocked, &rdev->flags);
+	remove_and_add_spares(mddev, rdev);
+
 	if (rdev->raid_disk >= 0)
 		goto busy;
 
@@ -6490,6 +6495,28 @@
 		err = md_set_readonly(mddev, bdev);
 		goto done_unlock;
 
+	case HOT_REMOVE_DISK:
+		err = hot_remove_disk(mddev, new_decode_dev(arg));
+		goto done_unlock;
+
+	case ADD_NEW_DISK:
+		/* We can support ADD_NEW_DISK on read-only arrays
+		 * on if we are re-adding a preexisting device.
+		 * So require mddev->pers and MD_DISK_SYNC.
+		 */
+		if (mddev->pers) {
+			mdu_disk_info_t info;
+			if (copy_from_user(&info, argp, sizeof(info)))
+				err = -EFAULT;
+			else if (!(info.state & (1<<MD_DISK_SYNC)))
+				/* Need to clear read-only for this */
+				break;
+			else
+				err = add_new_disk(mddev, &info);
+			goto done_unlock;
+		}
+		break;
+
 	case BLKROSET:
 		if (get_user(ro, (int __user *)(arg))) {
 			err = -EFAULT;
@@ -6560,10 +6587,6 @@
 		goto done_unlock;
 	}
 
-	case HOT_REMOVE_DISK:
-		err = hot_remove_disk(mddev, new_decode_dev(arg));
-		goto done_unlock;
-
 	case HOT_ADD_DISK:
 		err = hot_add_disk(mddev, new_decode_dev(arg));
 		goto done_unlock;
@@ -7644,14 +7667,16 @@
 }
 EXPORT_SYMBOL_GPL(md_do_sync);
 
-static int remove_and_add_spares(struct mddev *mddev)
+static int remove_and_add_spares(struct mddev *mddev,
+				 struct md_rdev *this)
 {
 	struct md_rdev *rdev;
 	int spares = 0;
 	int removed = 0;
 
 	rdev_for_each(rdev, mddev)
-		if (rdev->raid_disk >= 0 &&
+		if ((this == NULL || rdev == this) &&
+		    rdev->raid_disk >= 0 &&
 		    !test_bit(Blocked, &rdev->flags) &&
 		    (test_bit(Faulty, &rdev->flags) ||
 		     ! test_bit(In_sync, &rdev->flags)) &&
@@ -7666,72 +7691,50 @@
 	if (removed && mddev->kobj.sd)
 		sysfs_notify(&mddev->kobj, NULL, "degraded");
 
+	if (this)
+		goto no_add;
+
 	rdev_for_each(rdev, mddev) {
 		if (rdev->raid_disk >= 0 &&
 		    !test_bit(In_sync, &rdev->flags) &&
 		    !test_bit(Faulty, &rdev->flags))
 			spares++;
-		if (rdev->raid_disk < 0
-		    && !test_bit(Faulty, &rdev->flags)) {
-			rdev->recovery_offset = 0;
-			if (mddev->pers->
-			    hot_add_disk(mddev, rdev) == 0) {
-				if (sysfs_link_rdev(mddev, rdev))
-					/* failure here is OK */;
-				spares++;
-				md_new_event(mddev);
-				set_bit(MD_CHANGE_DEVS, &mddev->flags);
-			}
+		if (rdev->raid_disk >= 0)
+			continue;
+		if (test_bit(Faulty, &rdev->flags))
+			continue;
+		if (mddev->ro &&
+		    rdev->saved_raid_disk < 0)
+			continue;
+
+		rdev->recovery_offset = 0;
+		if (rdev->saved_raid_disk >= 0 && mddev->in_sync) {
+			spin_lock_irq(&mddev->write_lock);
+			if (mddev->in_sync)
+				/* OK, this device, which is in_sync,
+				 * will definitely be noticed before
+				 * the next write, so recovery isn't
+				 * needed.
+				 */
+				rdev->recovery_offset = mddev->recovery_cp;
+			spin_unlock_irq(&mddev->write_lock);
 		}
-	}
-	if (removed)
-		set_bit(MD_CHANGE_DEVS, &mddev->flags);
-	return spares;
-}
-
-static void reap_sync_thread(struct mddev *mddev)
-{
-	struct md_rdev *rdev;
-
-	/* resync has finished, collect result */
-	md_unregister_thread(&mddev->sync_thread);
-	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
-	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
-		/* success...*/
-		/* activate any spares */
-		if (mddev->pers->spare_active(mddev)) {
-			sysfs_notify(&mddev->kobj, NULL,
-				     "degraded");
+		if (mddev->ro && rdev->recovery_offset != MaxSector)
+			/* not safe to add this disk now */
+			continue;
+		if (mddev->pers->
+		    hot_add_disk(mddev, rdev) == 0) {
+			if (sysfs_link_rdev(mddev, rdev))
+				/* failure here is OK */;
+			spares++;
+			md_new_event(mddev);
 			set_bit(MD_CHANGE_DEVS, &mddev->flags);
 		}
 	}
-	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
-	    mddev->pers->finish_reshape)
-		mddev->pers->finish_reshape(mddev);
-
-	/* If array is no-longer degraded, then any saved_raid_disk
-	 * information must be scrapped.  Also if any device is now
-	 * In_sync we must scrape the saved_raid_disk for that device
-	 * do the superblock for an incrementally recovered device
-	 * written out.
-	 */
-	rdev_for_each(rdev, mddev)
-		if (!mddev->degraded ||
-		    test_bit(In_sync, &rdev->flags))
-			rdev->saved_raid_disk = -1;
-
-	md_update_sb(mddev, 1);
-	clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
-	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
-	clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
-	clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
-	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
-	/* flag recovery needed just to double check */
-	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
-	sysfs_notify_dirent_safe(mddev->sysfs_action);
-	md_new_event(mddev);
-	if (mddev->event_work.func)
-		queue_work(md_misc_wq, &mddev->event_work);
+no_add:
+	if (removed)
+		set_bit(MD_CHANGE_DEVS, &mddev->flags);
+	return spares;
 }
 
 /*
@@ -7789,22 +7792,16 @@
 		int spares = 0;
 
 		if (mddev->ro) {
-			/* Only thing we do on a ro array is remove
-			 * failed devices.
+			/* On a read-only array we can:
+			 * - remove failed devices
+			 * - add already-in_sync devices if the array itself
+			 *   is in-sync.
+			 * As we only add devices that are already in-sync,
+			 * we can activate the spares immediately.
 			 */
-			struct md_rdev *rdev;
-			rdev_for_each(rdev, mddev)
-				if (rdev->raid_disk >= 0 &&
-				    !test_bit(Blocked, &rdev->flags) &&
-				    test_bit(Faulty, &rdev->flags) &&
-				    atomic_read(&rdev->nr_pending)==0) {
-					if (mddev->pers->hot_remove_disk(
-						    mddev, rdev) == 0) {
-						sysfs_unlink_rdev(mddev, rdev);
-						rdev->raid_disk = -1;
-					}
-				}
 			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+			remove_and_add_spares(mddev, NULL);
+			mddev->pers->spare_active(mddev);
 			goto unlock;
 		}
 
@@ -7836,7 +7833,7 @@
 			goto unlock;
 		}
 		if (mddev->sync_thread) {
-			reap_sync_thread(mddev);
+			md_reap_sync_thread(mddev);
 			goto unlock;
 		}
 		/* Set RUNNING before clearing NEEDED to avoid
@@ -7867,7 +7864,7 @@
 				goto unlock;
 			set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
 			clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
-		} else if ((spares = remove_and_add_spares(mddev))) {
+		} else if ((spares = remove_and_add_spares(mddev, NULL))) {
 			clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
 			clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
 			clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
@@ -7917,6 +7914,51 @@
 	}
 }
 
+void md_reap_sync_thread(struct mddev *mddev)
+{
+	struct md_rdev *rdev;
+
+	/* resync has finished, collect result */
+	md_unregister_thread(&mddev->sync_thread);
+	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
+	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
+		/* success...*/
+		/* activate any spares */
+		if (mddev->pers->spare_active(mddev)) {
+			sysfs_notify(&mddev->kobj, NULL,
+				     "degraded");
+			set_bit(MD_CHANGE_DEVS, &mddev->flags);
+		}
+	}
+	if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+	    mddev->pers->finish_reshape)
+		mddev->pers->finish_reshape(mddev);
+
+	/* If array is no-longer degraded, then any saved_raid_disk
+	 * information must be scrapped.  Also if any device is now
+	 * In_sync we must scrape the saved_raid_disk for that device
+	 * do the superblock for an incrementally recovered device
+	 * written out.
+	 */
+	rdev_for_each(rdev, mddev)
+		if (!mddev->degraded ||
+		    test_bit(In_sync, &rdev->flags))
+			rdev->saved_raid_disk = -1;
+
+	md_update_sb(mddev, 1);
+	clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+	clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+	clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+	clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+	clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+	/* flag recovery needed just to double check */
+	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+	sysfs_notify_dirent_safe(mddev->sysfs_action);
+	md_new_event(mddev);
+	if (mddev->event_work.func)
+		queue_work(md_misc_wq, &mddev->event_work);
+}
+
 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
 {
 	sysfs_notify_dirent_safe(rdev->sysfs_state);
@@ -8642,6 +8684,7 @@
 EXPORT_SYMBOL(md_unregister_thread);
 EXPORT_SYMBOL(md_wakeup_thread);
 EXPORT_SYMBOL(md_check_recovery);
+EXPORT_SYMBOL(md_reap_sync_thread);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("MD RAID framework");
 MODULE_ALIAS("md");
diff --git a/drivers/md/md.h b/drivers/md/md.h
index d90fb1a..653f992b6 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -567,6 +567,7 @@
 extern void md_unregister_thread(struct md_thread **threadp);
 extern void md_wakeup_thread(struct md_thread *thread);
 extern void md_check_recovery(struct mddev *mddev);
+extern void md_reap_sync_thread(struct mddev *mddev);
 extern void md_write_start(struct mddev *mddev, struct bio *bi);
 extern void md_write_end(struct mddev *mddev);
 extern void md_done_sync(struct mddev *mddev, int blocks, int ok);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index fd86b37..851023e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -981,7 +981,12 @@
 	while (bio) { /* submit pending writes */
 		struct bio *next = bio->bi_next;
 		bio->bi_next = NULL;
-		generic_make_request(bio);
+		if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+		    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+			/* Just ignore it */
+			bio_endio(bio, 0);
+		else
+			generic_make_request(bio);
 		bio = next;
 	}
 	kfree(plug);
@@ -2901,6 +2906,7 @@
 	if (conf->r1bio_pool)
 		mempool_destroy(conf->r1bio_pool);
 	kfree(conf->mirrors);
+	safe_put_page(conf->tmppage);
 	kfree(conf->poolinfo);
 	kfree(conf);
 	mddev->private = NULL;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 77b562d..018741b 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1133,7 +1133,12 @@
 	while (bio) { /* submit pending writes */
 		struct bio *next = bio->bi_next;
 		bio->bi_next = NULL;
-		generic_make_request(bio);
+		if (unlikely((bio->bi_rw & REQ_DISCARD) &&
+		    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
+			/* Just ignore it */
+			bio_endio(bio, 0);
+		else
+			generic_make_request(bio);
 		bio = next;
 	}
 	kfree(plug);
@@ -2913,6 +2918,22 @@
 		if (init_resync(conf))
 			return 0;
 
+	/*
+	 * Allow skipping a full rebuild for incremental assembly
+	 * of a clean array, like RAID1 does.
+	 */
+	if (mddev->bitmap == NULL &&
+	    mddev->recovery_cp == MaxSector &&
+	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
+	    conf->fullsync == 0) {
+		*skipped = 1;
+		max_sector = mddev->dev_sectors;
+		if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
+		    test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+			max_sector = mddev->resync_max_sectors;
+		return max_sector - sector_nr;
+	}
+
  skipped:
 	max_sector = mddev->dev_sectors;
 	if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
@@ -3810,6 +3831,7 @@
 
 	if (conf->r10bio_pool)
 		mempool_destroy(conf->r10bio_pool);
+	safe_put_page(conf->tmppage);
 	kfree(conf->mirrors);
 	kfree(conf);
 	mddev->private = NULL;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f4e87bf..4a7be45 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1887,8 +1887,15 @@
 					&rdev->mddev->recovery);
 		} else if (is_badblock(rdev, sh->sector,
 				       STRIPE_SECTORS,
-				       &first_bad, &bad_sectors))
+				       &first_bad, &bad_sectors)) {
 			set_bit(R5_MadeGood, &sh->dev[i].flags);
+			if (test_bit(R5_ReadError, &sh->dev[i].flags))
+				/* That was a successful write so make
+				 * sure it looks like we already did
+				 * a re-write.
+				 */
+				set_bit(R5_ReWrite, &sh->dev[i].flags);
+		}
 	}
 	rdev_dec_pending(rdev, conf->mddev);
 
@@ -4672,9 +4679,10 @@
 		*skipped = 1;
 		return rv;
 	}
-	if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
-	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
-	    !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
+	if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
+	    !conf->fullsync &&
+	    !bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
+	    sync_blocks >= STRIPE_SECTORS) {
 		/* we can skip this block, and probably more */
 		sync_blocks /= STRIPE_SECTORS;
 		*skipped = 1;
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 7c84ced..f276352 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -95,6 +95,7 @@
 #define AB8500_IT_MASK22_REG		0x55
 #define AB8500_IT_MASK23_REG		0x56
 #define AB8500_IT_MASK24_REG		0x57
+#define AB8500_IT_MASK25_REG		0x58
 
 /*
  * latch hierarchy registers
@@ -102,15 +103,25 @@
 #define AB8500_IT_LATCHHIER1_REG	0x60
 #define AB8500_IT_LATCHHIER2_REG	0x61
 #define AB8500_IT_LATCHHIER3_REG	0x62
+#define AB8540_IT_LATCHHIER4_REG	0x63
 
 #define AB8500_IT_LATCHHIER_NUM		3
+#define AB8540_IT_LATCHHIER_NUM		4
 
 #define AB8500_REV_REG			0x80
 #define AB8500_IC_NAME_REG		0x82
 #define AB8500_SWITCH_OFF_STATUS	0x00
 
 #define AB8500_TURN_ON_STATUS		0x00
+#define AB8505_TURN_ON_STATUS_2	0x04
 
+#define AB8500_CH_USBCH_STAT1_REG	0x02
+#define VBUS_DET_DBNC100		0x02
+#define VBUS_DET_DBNC1			0x01
+
+static DEFINE_SPINLOCK(on_stat_lock);
+static u8 turn_on_stat_mask = 0xFF;
+static u8 turn_on_stat_set;
 static bool no_bm; /* No battery management */
 module_param(no_bm, bool, S_IRUGO);
 
@@ -130,9 +141,15 @@
 	0, 1, 2, 3, 4, 6, 7, 8, 9, 11, 18, 19, 20, 21,
 };
 
-/* AB9540 support */
+/* AB9540 / AB8505 support */
 static const int ab9540_irq_regoffset[AB9540_NUM_IRQ_REGS] = {
-	0, 1, 2, 3, 4, 6, 7, 8, 9, 11, 18, 19, 20, 21, 12, 13, 24,
+	0, 1, 2, 3, 4, 6, 7, 8, 9, 11, 18, 19, 20, 21, 12, 13, 24, 5, 22, 23
+};
+
+/* AB8540 support */
+static const int ab8540_irq_regoffset[AB8540_NUM_IRQ_REGS] = {
+	0, 1, 2, 3, 4, -1, -1, -1, -1, 11, 18, 19, 20, 21, 12, 13, 24, 5, 22, 23,
+	25, 26, 27, 28, 29, 30, 31,
 };
 
 static const char ab8500_version_str[][7] = {
@@ -352,6 +369,9 @@
 			is_ab8500_1p1_or_earlier(ab8500))
 			continue;
 
+		if (ab8500->irq_reg_offset[i] < 0)
+			continue;
+
 		ab8500->oldmask[i] = new;
 
 		reg = AB8500_IT_MASK1_REG + ab8500->irq_reg_offset[i];
@@ -423,6 +443,18 @@
 	.irq_set_type		= ab8500_irq_set_type,
 };
 
+static void update_latch_offset(u8 *offset, int i)
+{
+	/* Fix inconsistent ITFromLatch25 bit mapping... */
+	if (unlikely(*offset == 17))
+			*offset = 24;
+	/* Fix inconsistent ab8540 bit mapping... */
+	if (unlikely(*offset == 16))
+			*offset = 25;
+	if ((i==3) && (*offset >= 24))
+			*offset += 2;
+}
+
 static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
 					int latch_offset, u8 latch_val)
 {
@@ -474,9 +506,7 @@
 		latch_bit = __ffs(hier_val);
 		latch_offset = (hier_offset << 3) + latch_bit;
 
-		/* Fix inconsistent ITFromLatch25 bit mapping... */
-		if (unlikely(latch_offset == 17))
-			latch_offset = 24;
+		update_latch_offset(&latch_offset, hier_offset);
 
 		status = get_register_interruptible(ab8500,
 				AB8500_INTERRUPT,
@@ -504,7 +534,7 @@
 	dev_vdbg(ab8500->dev, "interrupt\n");
 
 	/*  Hierarchical interrupt version */
-	for (i = 0; i < AB8500_IT_LATCHHIER_NUM; i++) {
+	for (i = 0; i < (ab8500->it_latchhier_num); i++) {
 		int status;
 		u8 hier_val;
 
@@ -520,63 +550,6 @@
 	return IRQ_HANDLED;
 }
 
-/**
- * ab8500_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
- *
- * @ab8500: ab8500_irq controller to operate on.
- * @irq: index of the interrupt requested in the chip IRQs
- *
- * Useful for drivers to request their own IRQs.
- */
-static int ab8500_irq_get_virq(struct ab8500 *ab8500, int irq)
-{
-	if (!ab8500)
-		return -EINVAL;
-
-	return irq_create_mapping(ab8500->domain, irq);
-}
-
-static irqreturn_t ab8500_irq(int irq, void *dev)
-{
-	struct ab8500 *ab8500 = dev;
-	int i;
-
-	dev_vdbg(ab8500->dev, "interrupt\n");
-
-	atomic_inc(&ab8500->transfer_ongoing);
-
-	for (i = 0; i < ab8500->mask_size; i++) {
-		int regoffset = ab8500->irq_reg_offset[i];
-		int status;
-		u8 value;
-
-		/*
-		 * Interrupt register 12 doesn't exist prior to AB8500 version
-		 * 2.0
-		 */
-		if (regoffset == 11 && is_ab8500_1p1_or_earlier(ab8500))
-			continue;
-
-		status = get_register_interruptible(ab8500, AB8500_INTERRUPT,
-			AB8500_IT_LATCH1_REG + regoffset, &value);
-		if (status < 0 || value == 0)
-			continue;
-
-		do {
-			int bit = __ffs(value);
-			int line = i * 8 + bit;
-			int virq = ab8500_irq_get_virq(ab8500, line);
-
-			handle_nested_irq(virq);
-			ab8500_debug_register_interrupt(line);
-			value &= ~(1 << bit);
-
-		} while (value);
-	}
-	atomic_dec(&ab8500->transfer_ongoing);
-	return IRQ_HANDLED;
-}
-
 static int ab8500_irq_map(struct irq_domain *d, unsigned int virq,
 				irq_hw_number_t hwirq)
 {
@@ -607,7 +580,9 @@
 {
 	int num_irqs;
 
-	if (is_ab9540(ab8500))
+	if (is_ab8540(ab8500))
+		num_irqs = AB8540_NR_IRQS;
+	else if (is_ab9540(ab8500))
 		num_irqs = AB9540_NR_IRQS;
 	else if (is_ab8505(ab8500))
 		num_irqs = AB8505_NR_IRQS;
@@ -650,6 +625,15 @@
 	},
 };
 
+static struct resource ab8505_gpadc_resources[] = {
+	{
+		.name	= "SW_CONV_END",
+		.start	= AB8500_INT_GP_SW_ADC_CONV_END,
+		.end	= AB8500_INT_GP_SW_ADC_CONV_END,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
 static struct resource ab8500_rtc_resources[] = {
 	{
 		.name	= "60S",
@@ -973,6 +957,30 @@
 		.end   = AB8505_INT_KEYSTUCK,
 		.flags = IORESOURCE_IRQ,
 	},
+	{
+		.name = "VBUS_DET_R",
+		.start = AB8500_INT_VBUS_DET_R,
+		.end = AB8500_INT_VBUS_DET_R,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "VBUS_DET_F",
+		.start = AB8500_INT_VBUS_DET_F,
+		.end = AB8500_INT_VBUS_DET_F,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "ID_DET_PLUGR",
+		.start = AB8500_INT_ID_DET_PLUGR,
+		.end = AB8500_INT_ID_DET_PLUGR,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.name = "ID_DET_PLUGF",
+		.start = AB8500_INT_ID_DET_PLUGF,
+		.end = AB8500_INT_ID_DET_PLUGF,
+		.flags = IORESOURCE_IRQ,
+	},
 };
 
 static struct resource ab8500_temp_resources[] = {
@@ -984,82 +992,6 @@
 	},
 };
 
-static struct mfd_cell abx500_common_devs[] = {
-#ifdef CONFIG_DEBUG_FS
-	{
-		.name = "ab8500-debug",
-		.of_compatible = "stericsson,ab8500-debug",
-		.num_resources = ARRAY_SIZE(ab8500_debug_resources),
-		.resources = ab8500_debug_resources,
-	},
-#endif
-	{
-		.name = "ab8500-sysctrl",
-		.of_compatible = "stericsson,ab8500-sysctrl",
-	},
-	{
-		.name = "ab8500-regulator",
-		.of_compatible = "stericsson,ab8500-regulator",
-	},
-	{
-		.name = "abx500-clk",
-		.of_compatible = "stericsson,abx500-clk",
-	},
-	{
-		.name = "ab8500-gpadc",
-		.of_compatible = "stericsson,ab8500-gpadc",
-		.num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
-		.resources = ab8500_gpadc_resources,
-	},
-	{
-		.name = "ab8500-rtc",
-		.of_compatible = "stericsson,ab8500-rtc",
-		.num_resources = ARRAY_SIZE(ab8500_rtc_resources),
-		.resources = ab8500_rtc_resources,
-	},
-	{
-		.name = "ab8500-acc-det",
-		.of_compatible = "stericsson,ab8500-acc-det",
-		.num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources),
-		.resources = ab8500_av_acc_detect_resources,
-	},
-	{
-		.name = "ab8500-poweron-key",
-		.of_compatible = "stericsson,ab8500-poweron-key",
-		.num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
-		.resources = ab8500_poweronkey_db_resources,
-	},
-	{
-		.name = "ab8500-pwm",
-		.of_compatible = "stericsson,ab8500-pwm",
-		.id = 1,
-	},
-	{
-		.name = "ab8500-pwm",
-		.of_compatible = "stericsson,ab8500-pwm",
-		.id = 2,
-	},
-	{
-		.name = "ab8500-pwm",
-		.of_compatible = "stericsson,ab8500-pwm",
-		.id = 3,
-	},
-	{
-		.name = "ab8500-leds",
-		.of_compatible = "stericsson,ab8500-leds",
-	},
-	{
-		.name = "ab8500-denc",
-		.of_compatible = "stericsson,ab8500-denc",
-	},
-	{
-		.name = "abx500-temp",
-		.of_compatible = "stericsson,abx500-temp",
-		.num_resources = ARRAY_SIZE(ab8500_temp_resources),
-		.resources = ab8500_temp_resources,
-	},
-};
-
 static struct mfd_cell ab8500_bm_devs[] = {
 	{
 		.name = "ab8500-charger",
@@ -1096,23 +1028,144 @@
 };
 
 static struct mfd_cell ab8500_devs[] = {
+#ifdef CONFIG_DEBUG_FS
 	{
-		.name = "pinctrl-ab8500",
+		.name = "ab8500-debug",
+		.of_compatible = "stericsson,ab8500-debug",
+		.num_resources = ARRAY_SIZE(ab8500_debug_resources),
+		.resources = ab8500_debug_resources,
+	},
+#endif
+	{
+		.name = "ab8500-sysctrl",
+		.of_compatible = "stericsson,ab8500-sysctrl",
+	},
+	{
+		.name = "ab8500-regulator",
+		.of_compatible = "stericsson,ab8500-regulator",
+	},
+	{
+		.name = "abx500-clk",
+		.of_compatible = "stericsson,abx500-clk",
+	},
+	{
+		.name = "ab8500-gpadc",
+		.num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
+		.resources = ab8500_gpadc_resources,
+	},
+	{
+		.name = "ab8500-rtc",
+		.of_compatible = "stericsson,ab8500-rtc",
+		.num_resources = ARRAY_SIZE(ab8500_rtc_resources),
+		.resources = ab8500_rtc_resources,
+	},
+	{
+		.name = "ab8500-acc-det",
+		.of_compatible = "stericsson,ab8500-acc-det",
+		.num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources),
+		.resources = ab8500_av_acc_detect_resources,
+	},
+	{
+
+		.name = "ab8500-poweron-key",
+		.of_compatible = "stericsson,ab8500-poweron-key",
+		.num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
+		.resources = ab8500_poweronkey_db_resources,
+	},
+	{
+		.name = "ab8500-pwm",
+		.of_compatible = "stericsson,ab8500-pwm",
+		.id = 1,
+	},
+	{
+		.name = "ab8500-pwm",
+		.of_compatible = "stericsson,ab8500-pwm",
+		.id = 2,
+	},
+	{
+		.name = "ab8500-pwm",
+		.of_compatible = "stericsson,ab8500-pwm",
+		.id = 3,
+	},
+	{
+		.name = "ab8500-leds",
+		.of_compatible = "stericsson,ab8500-leds",
+	},
+	{
+		.name = "ab8500-denc",
+		.of_compatible = "stericsson,ab8500-denc",
+	},
+	{
+		.name = "ab8500-gpio",
 		.of_compatible = "stericsson,ab8500-gpio",
 	},
 	{
+		.name = "abx500-temp",
+		.of_compatible = "stericsson,abx500-temp",
+		.num_resources = ARRAY_SIZE(ab8500_temp_resources),
+		.resources = ab8500_temp_resources,
+	},
+	{
 		.name = "ab8500-usb",
-		.of_compatible = "stericsson,ab8500-usb",
 		.num_resources = ARRAY_SIZE(ab8500_usb_resources),
 		.resources = ab8500_usb_resources,
 	},
 	{
 		.name = "ab8500-codec",
-		.of_compatible = "stericsson,ab8500-codec",
 	},
 };
 
 static struct mfd_cell ab9540_devs[] = {
+#ifdef CONFIG_DEBUG_FS
+	{
+		.name = "ab8500-debug",
+		.num_resources = ARRAY_SIZE(ab8500_debug_resources),
+		.resources = ab8500_debug_resources,
+	},
+#endif
+	{
+		.name = "ab8500-sysctrl",
+	},
+	{
+		.name = "ab8500-regulator",
+	},
+	{
+		.name = "abx500-clk",
+		.of_compatible = "stericsson,abx500-clk",
+	},
+	{
+		.name = "ab8500-gpadc",
+		.of_compatible = "stericsson,ab8500-gpadc",
+		.num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
+		.resources = ab8500_gpadc_resources,
+	},
+	{
+		.name = "ab8500-rtc",
+		.num_resources = ARRAY_SIZE(ab8500_rtc_resources),
+		.resources = ab8500_rtc_resources,
+	},
+	{
+		.name = "ab8500-acc-det",
+		.num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources),
+		.resources = ab8500_av_acc_detect_resources,
+	},
+	{
+		.name = "ab8500-poweron-key",
+		.num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
+		.resources = ab8500_poweronkey_db_resources,
+	},
+	{
+		.name = "ab8500-pwm",
+		.id = 1,
+	},
+	{
+		.name = "ab8500-leds",
+	},
+	{
+		.name = "abx500-temp",
+		.num_resources = ARRAY_SIZE(ab8500_temp_resources),
+		.resources = ab8500_temp_resources,
+	},
 	{
 		.name = "pinctrl-ab9540",
 		.of_compatible = "stericsson,ab9540-gpio",
@@ -1125,10 +1178,138 @@
 	{
 		.name = "ab9540-codec",
 	},
+	{
+		.name = "ab-iddet",
+		.num_resources = ARRAY_SIZE(ab8505_iddet_resources),
+		.resources = ab8505_iddet_resources,
+	},
 };
 
-/* Device list common to ab9540 and ab8505 */
-static struct mfd_cell ab9540_ab8505_devs[] = {
+/* Device list for ab8505  */
+static struct mfd_cell ab8505_devs[] = {
+#ifdef CONFIG_DEBUG_FS
+	{
+		.name = "ab8500-debug",
+		.num_resources = ARRAY_SIZE(ab8500_debug_resources),
+		.resources = ab8500_debug_resources,
+	},
+#endif
+	{
+		.name = "ab8500-sysctrl",
+	},
+	{
+		.name = "ab8500-regulator",
+	},
+	{
+		.name = "abx500-clk",
+		.of_compatible = "stericsson,abx500-clk",
+	},
+	{
+		.name = "ab8500-gpadc",
+		.num_resources = ARRAY_SIZE(ab8505_gpadc_resources),
+		.resources = ab8505_gpadc_resources,
+	},
+	{
+		.name = "ab8500-rtc",
+		.num_resources = ARRAY_SIZE(ab8500_rtc_resources),
+		.resources = ab8500_rtc_resources,
+	},
+	{
+		.name = "ab8500-acc-det",
+		.num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources),
+		.resources = ab8500_av_acc_detect_resources,
+	},
+	{
+		.name = "ab8500-poweron-key",
+		.num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
+		.resources = ab8500_poweronkey_db_resources,
+	},
+	{
+		.name = "ab8500-pwm",
+		.id = 1,
+	},
+	{
+		.name = "ab8500-leds",
+	},
+	{
+		.name = "ab8500-gpio",
+	},
+	{
+		.name = "ab8500-usb",
+		.num_resources = ARRAY_SIZE(ab8500_usb_resources),
+		.resources = ab8500_usb_resources,
+	},
+	{
+		.name = "ab8500-codec",
+	},
+	{
+		.name = "ab-iddet",
+		.num_resources = ARRAY_SIZE(ab8505_iddet_resources),
+		.resources = ab8505_iddet_resources,
+	},
+};
+
+static struct mfd_cell ab8540_devs[] = {
+#ifdef CONFIG_DEBUG_FS
+	{
+		.name = "ab8500-debug",
+		.num_resources = ARRAY_SIZE(ab8500_debug_resources),
+		.resources = ab8500_debug_resources,
+	},
+#endif
+	{
+		.name = "ab8500-sysctrl",
+	},
+	{
+		.name = "ab8500-regulator",
+	},
+	{
+		.name = "abx500-clk",
+		.of_compatible = "stericsson,abx500-clk",
+	},
+	{
+		.name = "ab8500-gpadc",
+		.num_resources = ARRAY_SIZE(ab8505_gpadc_resources),
+		.resources = ab8505_gpadc_resources,
+	},
+	{
+		.name = "ab8500-rtc",
+		.num_resources = ARRAY_SIZE(ab8500_rtc_resources),
+		.resources = ab8500_rtc_resources,
+	},
+	{
+		.name = "ab8500-acc-det",
+		.num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources),
+		.resources = ab8500_av_acc_detect_resources,
+	},
+	{
+		.name = "ab8500-poweron-key",
+		.num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
+		.resources = ab8500_poweronkey_db_resources,
+	},
+	{
+		.name = "ab8500-pwm",
+		.id = 1,
+	},
+	{
+		.name = "ab8500-leds",
+	},
+	{
+		.name = "abx500-temp",
+		.num_resources = ARRAY_SIZE(ab8500_temp_resources),
+		.resources = ab8500_temp_resources,
+	},
+	{
+		.name = "ab8500-gpio",
+	},
+	{
+		.name = "ab8540-usb",
+		.num_resources = ARRAY_SIZE(ab8500_usb_resources),
+		.resources = ab8500_usb_resources,
+	},
+	{
+		.name = "ab8540-codec",
+	},
 	{
 		.name = "ab-iddet",
 		.num_resources = ARRAY_SIZE(ab8505_iddet_resources),
@@ -1142,6 +1323,7 @@
 	struct ab8500 *ab8500;
 
 	ab8500 = dev_get_drvdata(dev);
+
 	return sprintf(buf, "%#x\n", ab8500 ? ab8500->chip_id : -EINVAL);
 }
 
@@ -1171,6 +1353,15 @@
 	return sprintf(buf, "%#x\n", value);
 }
 
+/* use mask and set to override the register turn_on_stat value */
+void ab8500_override_turn_on_stat(u8 mask, u8 set)
+{
+	spin_lock(&on_stat_lock);
+	turn_on_stat_mask = mask;
+	turn_on_stat_set = set;
+	spin_unlock(&on_stat_lock);
+}
+
 /*
  * ab8500 has turned on due to (TURN_ON_STATUS):
  * 0x01 PORnVbat
@@ -1194,9 +1385,38 @@
 		AB8500_TURN_ON_STATUS, &value);
 	if (ret < 0)
 		return ret;
+
+	/*
+	 * In L9540, turn_on_status register is not updated correctly if
+	 * the device is rebooted with AC/USB charger connected. Due to
+	 * this, the device boots android instead of entering into charge
+	 * only mode. Read the AC/USB status register to detect the charger
+	 * presence and update the turn on status manually.
+	 */
+	if (is_ab9540(ab8500)) {
+		spin_lock(&on_stat_lock);
+		value = (value & turn_on_stat_mask) | turn_on_stat_set;
+		spin_unlock(&on_stat_lock);
+	}
+
 	return sprintf(buf, "%#x\n", value);
 }
 
+static ssize_t show_turn_on_status_2(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int ret;
+	u8 value;
+	struct ab8500 *ab8500;
+
+	ab8500 = dev_get_drvdata(dev);
+	ret = get_register_interruptible(ab8500, AB8500_SYS_CTRL1_BLOCK,
+		AB8505_TURN_ON_STATUS_2, &value);
+	if (ret < 0)
+		return ret;
+	return sprintf(buf, "%#x\n", (value & 0x1));
+}
+
 static ssize_t show_ab9540_dbbrstn(struct device *dev,
 				struct device_attribute *attr, char *buf)
 {
@@ -1253,6 +1473,7 @@
 static DEVICE_ATTR(chip_id, S_IRUGO, show_chip_id, NULL);
 static DEVICE_ATTR(switch_off_status, S_IRUGO, show_switch_off_status, NULL);
 static DEVICE_ATTR(turn_on_status, S_IRUGO, show_turn_on_status, NULL);
+static DEVICE_ATTR(turn_on_status_2, S_IRUGO, show_turn_on_status_2, NULL);
 static DEVICE_ATTR(dbbrstn, S_IRUGO | S_IWUSR,
 			show_ab9540_dbbrstn, store_ab9540_dbbrstn);
 
@@ -1263,6 +1484,11 @@
 	NULL,
 };
 
+static struct attribute *ab8505_sysfs_entries[] = {
+	&dev_attr_turn_on_status_2.attr,
+	NULL,
+};
+
 static struct attribute *ab9540_sysfs_entries[] = {
 	&dev_attr_chip_id.attr,
 	&dev_attr_switch_off_status.attr,
@@ -1275,6 +1501,10 @@
 	.attrs	= ab8500_sysfs_entries,
 };
 
+static struct attribute_group ab8505_attr_group = {
+	.attrs	= ab8505_sysfs_entries,
+};
+
 static struct attribute_group ab9540_attr_group = {
 	.attrs	= ab9540_sysfs_entries,
 };
@@ -1290,6 +1520,15 @@
 		"Battery level lower than power on reset threshold",
 		"Power on key 1 pressed longer than 10 seconds",
 		"DB8500 thermal shutdown"};
+	static char *turn_on_status[] = {
+		"Battery rising (Vbat)",
+		"Power On Key 1 dbF",
+		"Power On Key 2 dbF",
+		"RTC Alarm",
+		"Main Charger Detect",
+		"Vbus Detect (USB)",
+		"USB ID Detect",
+		"UART Factory Mode Detect"};
 	struct ab8500_platform_data *plat = dev_get_platdata(&pdev->dev);
 	const struct platform_device_id *platid = platform_get_device_id(pdev);
 	enum ab8500_version version = AB8500_VERSION_UNDEFINED;
@@ -1351,13 +1590,20 @@
 			ab8500->chip_id >> 4,
 			ab8500->chip_id & 0x0F);
 
-	/* Configure AB8500 or AB9540 IRQ */
-	if (is_ab9540(ab8500) || is_ab8505(ab8500)) {
+	/* Configure AB8540 */
+	if (is_ab8540(ab8500)) {
+		ab8500->mask_size = AB8540_NUM_IRQ_REGS;
+		ab8500->irq_reg_offset = ab8540_irq_regoffset;
+		ab8500->it_latchhier_num = AB8540_IT_LATCHHIER_NUM;
+	}/* Configure AB8500 or AB9540 IRQ */
+	else if (is_ab9540(ab8500) || is_ab8505(ab8500)) {
 		ab8500->mask_size = AB9540_NUM_IRQ_REGS;
 		ab8500->irq_reg_offset = ab9540_irq_regoffset;
+		ab8500->it_latchhier_num = AB8500_IT_LATCHHIER_NUM;
 	} else {
 		ab8500->mask_size = AB8500_NUM_IRQ_REGS;
 		ab8500->irq_reg_offset = ab8500_irq_regoffset;
+		ab8500->it_latchhier_num = AB8500_IT_LATCHHIER_NUM;
 	}
 	ab8500->mask = devm_kzalloc(&pdev->dev, ab8500->mask_size, GFP_KERNEL);
 	if (!ab8500->mask)
@@ -1396,10 +1642,36 @@
 	} else {
 		printk(KERN_CONT " None\n");
 	}
+	ret = get_register_interruptible(ab8500, AB8500_SYS_CTRL1_BLOCK,
+		AB8500_TURN_ON_STATUS, &value);
+	if (ret < 0)
+		return ret;
+	dev_info(ab8500->dev, "turn on reason(s) (%#x): ", value);
+
+	if (value) {
+		for (i = 0; i < ARRAY_SIZE(turn_on_status); i++) {
+			if (value & 1)
+				printk("\"%s\" ", turn_on_status[i]);
+			value = value >> 1;
+		}
+		printk("\n");
+	} else {
+		printk("None\n");
+	}
 
 	if (plat && plat->init)
 		plat->init(ab8500);
 
+	if (is_ab9540(ab8500)) {
+		ret = get_register_interruptible(ab8500, AB8500_CHARGER,
+			AB8500_CH_USBCH_STAT1_REG, &value);
+		if (ret < 0)
+			return ret;
+		if ((value & VBUS_DET_DBNC1) && (value & VBUS_DET_DBNC100))
+			ab8500_override_turn_on_stat(~AB8500_POW_KEY_1_ON,
+						     AB8500_VBUS_DET);
+	}
+
 	/* Clear and mask all interrupts */
 	for (i = 0; i < ab8500->mask_size; i++) {
 		/*
@@ -1410,6 +1682,9 @@
 				is_ab8500_1p1_or_earlier(ab8500))
 			continue;
 
+		if (ab8500->irq_reg_offset[i] < 0)
+			continue;
+
 		get_register_interruptible(ab8500, AB8500_INTERRUPT,
 			AB8500_IT_LATCH1_REG + ab8500->irq_reg_offset[i],
 			&value);
@@ -1428,26 +1703,10 @@
 	if (ret)
 		return ret;
 
-	/*  Activate this feature only in ab9540 */
-	/*  till tests are done on ab8500 1p2 or later*/
-	if (is_ab9540(ab8500)) {
-		ret = devm_request_threaded_irq(&pdev->dev, ab8500->irq, NULL,
-						ab8500_hierarchical_irq,
-						IRQF_ONESHOT | IRQF_NO_SUSPEND,
-						"ab8500", ab8500);
-	}
-	else {
-		ret = devm_request_threaded_irq(&pdev->dev, ab8500->irq, NULL,
-						ab8500_irq,
-						IRQF_ONESHOT | IRQF_NO_SUSPEND,
-						"ab8500", ab8500);
-		if (ret)
-			return ret;
-	}
-
-	ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
-			ARRAY_SIZE(abx500_common_devs), NULL,
-			ab8500->irq_base, ab8500->domain);
+	ret = devm_request_threaded_irq(&pdev->dev, ab8500->irq, NULL,
+			ab8500_hierarchical_irq,
+			IRQF_ONESHOT | IRQF_NO_SUSPEND,
+			"ab8500", ab8500);
 	if (ret)
 		return ret;
 
@@ -1455,6 +1714,14 @@
 		ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
 				ARRAY_SIZE(ab9540_devs), NULL,
 				ab8500->irq_base, ab8500->domain);
+	else if (is_ab8540(ab8500))
+		ret = mfd_add_devices(ab8500->dev, 0, ab8540_devs,
+			      ARRAY_SIZE(ab8540_devs), NULL,
+			      ab8500->irq_base, ab8500->domain);
+	else if (is_ab8505(ab8500))
+		ret = mfd_add_devices(ab8500->dev, 0, ab8505_devs,
+			      ARRAY_SIZE(ab8505_devs), NULL,
+			      ab8500->irq_base, ab8500->domain);
 	else
 		ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
 				ARRAY_SIZE(ab8500_devs), NULL,
@@ -1462,13 +1729,6 @@
 	if (ret)
 		return ret;
 
-	if (is_ab9540(ab8500) || is_ab8505(ab8500))
-		ret = mfd_add_devices(ab8500->dev, 0, ab9540_ab8505_devs,
-				ARRAY_SIZE(ab9540_ab8505_devs), NULL,
-				ab8500->irq_base, ab8500->domain);
-	if (ret)
-		return ret;
-
 	if (!no_bm) {
 		/* Add battery management devices */
 		ret = mfd_add_devices(ab8500->dev, 0, ab8500_bm_devs,
@@ -1478,12 +1738,19 @@
 			dev_err(ab8500->dev, "error adding bm devices\n");
 	}
 
-	if (is_ab9540(ab8500))
+	if (((is_ab8505(ab8500) || is_ab9540(ab8500)) &&
+			ab8500->chip_id >= AB8500_CUT2P0) || is_ab8540(ab8500))
 		ret = sysfs_create_group(&ab8500->dev->kobj,
 					&ab9540_attr_group);
 	else
 		ret = sysfs_create_group(&ab8500->dev->kobj,
 					&ab8500_attr_group);
+
+	if ((is_ab8505(ab8500) || is_ab9540(ab8500)) &&
+			ab8500->chip_id >= AB8500_CUT2P0)
+		ret = sysfs_create_group(&ab8500->dev->kobj,
+					 &ab8505_attr_group);
+
 	if (ret)
 		dev_err(ab8500->dev, "error creating sysfs entries\n");
 
@@ -1494,11 +1761,16 @@
 {
 	struct ab8500 *ab8500 = platform_get_drvdata(pdev);
 
-	if (is_ab9540(ab8500))
+	if (((is_ab8505(ab8500) || is_ab9540(ab8500)) &&
+			ab8500->chip_id >= AB8500_CUT2P0) || is_ab8540(ab8500))
 		sysfs_remove_group(&ab8500->dev->kobj, &ab9540_attr_group);
 	else
 		sysfs_remove_group(&ab8500->dev->kobj, &ab8500_attr_group);
 
+	if ((is_ab8505(ab8500) || is_ab9540(ab8500)) &&
+			ab8500->chip_id >= AB8500_CUT2P0)
+		sysfs_remove_group(&ab8500->dev->kobj, &ab8505_attr_group);
+
 	mfd_remove_devices(ab8500->dev);
 
 	return 0;
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 45fe3c5..b88bbbc 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -80,6 +80,7 @@
 #include <linux/interrupt.h>
 #include <linux/kobject.h>
 #include <linux/slab.h>
+#include <linux/irq.h>
 
 #include <linux/mfd/abx500.h>
 #include <linux/mfd/abx500/ab8500.h>
@@ -90,6 +91,9 @@
 #include <linux/ctype.h>
 #endif
 
+/* TODO: this file should not reference IRQ_DB8500_AB8500! */
+#include <mach/irqs.h>
+
 static u32 debug_bank;
 static u32 debug_address;
 
@@ -101,6 +105,11 @@
 static struct device_attribute **dev_attr;
 static char **event_name;
 
+static u8 avg_sample = SAMPLE_16;
+static u8 trig_edge = RISING_EDGE;
+static u8 conv_type = ADC_SW;
+static u8 trig_timer;
+
 /**
  * struct ab8500_reg_range
  * @first: the first address of the range
@@ -150,7 +159,9 @@
 
 #define AB8500_REV_REG 0x80
 
-static struct ab8500_prcmu_ranges debug_ranges[AB8500_NUM_BANKS] = {
+static struct ab8500_prcmu_ranges *debug_ranges;
+
+struct ab8500_prcmu_ranges ab8500_debug_ranges[AB8500_NUM_BANKS] = {
 	[0x0] = {
 		.num_ranges = 0,
 		.range = NULL,
@@ -354,7 +365,7 @@
 			},
 			{
 				.first = 0xf5,
-				.last =	0xf6,
+				.last = 0xf6,
 			},
 		},
 	},
@@ -479,6 +490,781 @@
 	},
 };
 
+struct ab8500_prcmu_ranges ab8505_debug_ranges[AB8500_NUM_BANKS] = {
+	[0x0] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[AB8500_SYS_CTRL1_BLOCK] = {
+		.num_ranges = 5,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x04,
+			},
+			{
+				.first = 0x42,
+				.last = 0x42,
+			},
+			{
+				.first = 0x52,
+				.last = 0x52,
+			},
+			{
+				.first = 0x54,
+				.last = 0x57,
+			},
+			{
+				.first = 0x80,
+				.last = 0x83,
+			},
+		},
+	},
+	[AB8500_SYS_CTRL2_BLOCK] = {
+		.num_ranges = 5,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x0D,
+			},
+			{
+				.first = 0x0F,
+				.last = 0x17,
+			},
+			{
+				.first = 0x20,
+				.last = 0x20,
+			},
+			{
+				.first = 0x30,
+				.last = 0x30,
+			},
+			{
+				.first = 0x32,
+				.last = 0x3A,
+			},
+		},
+	},
+	[AB8500_REGU_CTRL1] = {
+		.num_ranges = 3,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x00,
+			},
+			{
+				.first = 0x03,
+				.last = 0x11,
+			},
+			{
+				.first = 0x80,
+				.last = 0x86,
+			},
+		},
+	},
+	[AB8500_REGU_CTRL2] = {
+		.num_ranges = 6,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x06,
+			},
+			{
+				.first = 0x08,
+				.last = 0x15,
+			},
+			{
+				.first = 0x17,
+				.last = 0x19,
+			},
+			{
+				.first = 0x1B,
+				.last = 0x1D,
+			},
+			{
+				.first = 0x1F,
+				.last = 0x30,
+			},
+			{
+				.first = 0x40,
+				.last = 0x48,
+			},
+			/* 0x80-0x8B is SIM registers and should
+			 * not be accessed from here */
+		},
+	},
+	[AB8500_USB] = {
+		.num_ranges = 3,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x80,
+				.last = 0x83,
+			},
+			{
+				.first = 0x87,
+				.last = 0x8A,
+			},
+			{
+				.first = 0x91,
+				.last = 0x94,
+			},
+		},
+	},
+	[AB8500_TVOUT] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[AB8500_DBI] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[AB8500_ECI_AV_ACC] = {
+		.num_ranges = 1,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x80,
+				.last = 0x82,
+			},
+		},
+	},
+	[AB8500_RESERVED] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[AB8500_GPADC] = {
+		.num_ranges = 1,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x08,
+			},
+		},
+	},
+	[AB8500_CHARGER] = {
+		.num_ranges = 9,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x02,
+				.last = 0x03,
+			},
+			{
+				.first = 0x05,
+				.last = 0x05,
+			},
+			{
+				.first = 0x40,
+				.last = 0x44,
+			},
+			{
+				.first = 0x50,
+				.last = 0x57,
+			},
+			{
+				.first = 0x60,
+				.last = 0x60,
+			},
+			{
+				.first = 0xA0,
+				.last = 0xA7,
+			},
+			{
+				.first = 0xAF,
+				.last = 0xB2,
+			},
+			{
+				.first = 0xC0,
+				.last = 0xC2,
+			},
+			{
+				.first = 0xF5,
+				.last = 0xF5,
+			},
+		},
+	},
+	[AB8500_GAS_GAUGE] = {
+		.num_ranges = 3,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x00,
+			},
+			{
+				.first = 0x07,
+				.last = 0x0A,
+			},
+			{
+				.first = 0x10,
+				.last = 0x14,
+			},
+		},
+	},
+	[AB8500_AUDIO] = {
+		.num_ranges = 1,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x83,
+			},
+		},
+	},
+	[AB8500_INTERRUPT] = {
+		.num_ranges = 11,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x04,
+			},
+			{
+				.first = 0x06,
+				.last = 0x07,
+			},
+			{
+				.first = 0x09,
+				.last = 0x09,
+			},
+			{
+				.first = 0x0B,
+				.last = 0x0C,
+			},
+			{
+				.first = 0x12,
+				.last = 0x15,
+			},
+			{
+				.first = 0x18,
+				.last = 0x18,
+			},
+			/* Latch registers should not be read here */
+			{
+				.first = 0x40,
+				.last = 0x44,
+			},
+			{
+				.first = 0x46,
+				.last = 0x49,
+			},
+			{
+				.first = 0x4B,
+				.last = 0x4D,
+			},
+			{
+				.first = 0x52,
+				.last = 0x55,
+			},
+			{
+				.first = 0x58,
+				.last = 0x58,
+			},
+			/* LatchHier registers should not be read here */
+		},
+	},
+	[AB8500_RTC] = {
+		.num_ranges = 2,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x14,
+			},
+			{
+				.first = 0x16,
+				.last = 0x17,
+			},
+		},
+	},
+	[AB8500_MISC] = {
+		.num_ranges = 8,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x06,
+			},
+			{
+				.first = 0x10,
+				.last = 0x16,
+			},
+			{
+				.first = 0x20,
+				.last = 0x26,
+			},
+			{
+				.first = 0x30,
+				.last = 0x36,
+			},
+			{
+				.first = 0x40,
+				.last = 0x46,
+			},
+			{
+				.first = 0x50,
+				.last = 0x50,
+			},
+			{
+				.first = 0x60,
+				.last = 0x6B,
+			},
+			{
+				.first = 0x80,
+				.last = 0x82,
+			},
+		},
+	},
+	[AB8500_DEVELOPMENT] = {
+		.num_ranges = 2,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x00,
+			},
+			{
+				.first = 0x05,
+				.last = 0x05,
+			},
+		},
+	},
+	[AB8500_DEBUG] = {
+		.num_ranges = 1,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x05,
+				.last = 0x07,
+			},
+		},
+	},
+	[AB8500_PROD_TEST] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[AB8500_STE_TEST] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[AB8500_OTP_EMUL] = {
+		.num_ranges = 1,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x01,
+				.last = 0x15,
+			},
+		},
+	},
+};
+
+struct ab8500_prcmu_ranges ab8540_debug_ranges[AB8500_NUM_BANKS] = {
+	[AB8500_M_FSM_RANK] = {
+		.num_ranges = 1,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x0B,
+			},
+		},
+	},
+	[AB8500_SYS_CTRL1_BLOCK] = {
+		.num_ranges = 6,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x04,
+			},
+			{
+				.first = 0x42,
+				.last = 0x42,
+			},
+			{
+				.first = 0x50,
+				.last = 0x54,
+			},
+			{
+				.first = 0x57,
+				.last = 0x57,
+			},
+			{
+				.first = 0x80,
+				.last = 0x83,
+			},
+			{
+				.first = 0x90,
+				.last = 0x90,
+			},
+		},
+	},
+	[AB8500_SYS_CTRL2_BLOCK] = {
+		.num_ranges = 5,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x0D,
+			},
+			{
+				.first = 0x0F,
+				.last = 0x10,
+			},
+			{
+				.first = 0x20,
+				.last = 0x21,
+			},
+			{
+				.first = 0x32,
+				.last = 0x3C,
+			},
+			{
+				.first = 0x40,
+				.last = 0x42,
+			},
+		},
+	},
+	[AB8500_REGU_CTRL1] = {
+		.num_ranges = 4,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x03,
+				.last = 0x15,
+			},
+			{
+				.first = 0x20,
+				.last = 0x20,
+			},
+			{
+				.first = 0x80,
+				.last = 0x85,
+			},
+			{
+				.first = 0x87,
+				.last = 0x88,
+			},
+		},
+	},
+	[AB8500_REGU_CTRL2] = {
+		.num_ranges = 8,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x06,
+			},
+			{
+				.first = 0x08,
+				.last = 0x15,
+			},
+			{
+				.first = 0x17,
+				.last = 0x19,
+			},
+			{
+				.first = 0x1B,
+				.last = 0x1D,
+			},
+			{
+				.first = 0x1F,
+				.last = 0x2F,
+			},
+			{
+				.first = 0x31,
+				.last = 0x3A,
+			},
+			{
+				.first = 0x43,
+				.last = 0x44,
+			},
+			{
+				.first = 0x48,
+				.last = 0x49,
+			},
+		},
+	},
+	[AB8500_USB] = {
+		.num_ranges = 3,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x80,
+				.last = 0x83,
+			},
+			{
+				.first = 0x87,
+				.last = 0x8A,
+			},
+			{
+				.first = 0x91,
+				.last = 0x94,
+			},
+		},
+	},
+	[AB8500_TVOUT] = {
+		.num_ranges = 0,
+		.range = NULL
+	},
+	[AB8500_DBI] = {
+		.num_ranges = 4,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x07,
+			},
+			{
+				.first = 0x10,
+				.last = 0x11,
+			},
+			{
+				.first = 0x20,
+				.last = 0x21,
+			},
+			{
+				.first = 0x30,
+				.last = 0x43,
+			},
+		},
+	},
+	[AB8500_ECI_AV_ACC] = {
+		.num_ranges = 2,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x03,
+			},
+			{
+				.first = 0x80,
+				.last = 0x82,
+			},
+		},
+	},
+	[AB8500_RESERVED] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[AB8500_GPADC] = {
+		.num_ranges = 4,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x01,
+			},
+			{
+				.first = 0x04,
+				.last = 0x06,
+			},
+			{
+				.first = 0x09,
+				.last = 0x0A,
+			},
+			{
+				.first = 0x10,
+				.last = 0x14,
+			},
+		},
+	},
+	[AB8500_CHARGER] = {
+		.num_ranges = 10,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x00,
+			},
+			{
+				.first = 0x02,
+				.last = 0x05,
+			},
+			{
+				.first = 0x40,
+				.last = 0x44,
+			},
+			{
+				.first = 0x50,
+				.last = 0x57,
+			},
+			{
+				.first = 0x60,
+				.last = 0x60,
+			},
+			{
+				.first = 0x70,
+				.last = 0x70,
+			},
+			{
+				.first = 0xA0,
+				.last = 0xA9,
+			},
+			{
+				.first = 0xAF,
+				.last = 0xB2,
+			},
+			{
+				.first = 0xC0,
+				.last = 0xC6,
+			},
+			{
+				.first = 0xF5,
+				.last = 0xF5,
+			},
+		},
+	},
+	[AB8500_GAS_GAUGE] = {
+		.num_ranges = 3,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x00,
+			},
+			{
+				.first = 0x07,
+				.last = 0x0A,
+			},
+			{
+				.first = 0x10,
+				.last = 0x14,
+			},
+		},
+	},
+	[AB8500_AUDIO] = {
+		.num_ranges = 1,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x9f,
+			},
+		},
+	},
+	[AB8500_INTERRUPT] = {
+		.num_ranges = 6,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x05,
+			},
+			{
+				.first = 0x0B,
+				.last = 0x0D,
+			},
+			{
+				.first = 0x12,
+				.last = 0x20,
+			},
+			/* Latch registers should not be read here */
+			{
+				.first = 0x40,
+				.last = 0x45,
+			},
+			{
+				.first = 0x4B,
+				.last = 0x4D,
+			},
+			{
+				.first = 0x52,
+				.last = 0x60,
+			},
+			/* LatchHier registers should not be read here */
+		},
+	},
+	[AB8500_RTC] = {
+		.num_ranges = 3,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x07,
+			},
+			{
+				.first = 0x0B,
+				.last = 0x18,
+			},
+			{
+				.first = 0x20,
+				.last = 0x25,
+			},
+		},
+	},
+	[AB8500_MISC] = {
+		.num_ranges = 9,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x06,
+			},
+			{
+				.first = 0x10,
+				.last = 0x16,
+			},
+			{
+				.first = 0x20,
+				.last = 0x26,
+			},
+			{
+				.first = 0x30,
+				.last = 0x36,
+			},
+			{
+				.first = 0x40,
+				.last = 0x49,
+			},
+			{
+				.first = 0x50,
+				.last = 0x50,
+			},
+			{
+				.first = 0x60,
+				.last = 0x6B,
+			},
+			{
+				.first = 0x70,
+				.last = 0x74,
+			},
+			{
+				.first = 0x80,
+				.last = 0x82,
+			},
+		},
+	},
+	[AB8500_DEVELOPMENT] = {
+		.num_ranges = 3,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x01,
+			},
+			{
+				.first = 0x06,
+				.last = 0x06,
+			},
+			{
+				.first = 0x10,
+				.last = 0x21,
+			},
+		},
+	},
+	[AB8500_DEBUG] = {
+		.num_ranges = 3,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x01,
+				.last = 0x0C,
+			},
+			{
+				.first = 0x0E,
+				.last = 0x11,
+			},
+			{
+				.first = 0x80,
+				.last = 0x81,
+			},
+		},
+	},
+	[AB8500_PROD_TEST] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[AB8500_STE_TEST] = {
+		.num_ranges = 0,
+		.range = NULL,
+	},
+	[AB8500_OTP_EMUL] = {
+		.num_ranges = 1,
+		.range = (struct ab8500_reg_range[]) {
+			{
+				.first = 0x00,
+				.last = 0x3F,
+			},
+		},
+	},
+};
+
+
 static irqreturn_t ab8500_debug_handler(int irq, void *data)
 {
 	char buf[16];
@@ -520,19 +1306,16 @@
 			}
 
 			if (s) {
-				err = seq_printf(s, "  [%u/0x%02X]: 0x%02X\n",
+				err = seq_printf(s, "  [0x%02X/0x%02X]: 0x%02X\n",
 					bank, reg, value);
 				if (err < 0) {
-					dev_err(dev,
-					"seq_printf overflow bank=%d reg=%d\n",
-						bank, reg);
 					/* Error is not returned here since
 					 * the output is wanted in any case */
 					return 0;
 				}
 			} else {
-				printk(KERN_INFO" [%u/0x%02X]: 0x%02X\n", bank,
-					reg, value);
+				printk(KERN_INFO" [0x%02X/0x%02X]: 0x%02X\n",
+					bank, reg, value);
 			}
 		}
 	}
@@ -546,7 +1329,7 @@
 
 	seq_printf(s, AB8500_NAME_STRING " register values:\n");
 
-	seq_printf(s, " bank %u:\n", bank);
+	seq_printf(s, " bank 0x%02X:\n", bank);
 
 	ab8500_registers_print(dev, bank, s);
 	return 0;
@@ -573,10 +1356,8 @@
 
 	seq_printf(s, AB8500_NAME_STRING " register values:\n");
 
-	for (i = 1; i < AB8500_NUM_BANKS; i++) {
-		err = seq_printf(s, " bank %u:\n", i);
-		if (err < 0)
-			dev_err(dev, "seq_printf overflow, bank=%d\n", i);
+	for (i = 0; i < AB8500_NUM_BANKS; i++) {
+		err = seq_printf(s, " bank 0x%02X:\n", i);
 
 		ab8500_registers_print(dev, i, s);
 	}
@@ -591,11 +1372,68 @@
 	printk(KERN_INFO"ab8500 register values:\n");
 
 	for (i = 1; i < AB8500_NUM_BANKS; i++) {
-		printk(KERN_INFO" bank %u:\n", i);
+		printk(KERN_INFO" bank 0x%02X:\n", i);
 		ab8500_registers_print(dev, i, NULL);
 	}
 }
 
+/* Space for 500 registers. */
+#define DUMP_MAX_REGS 700
+struct ab8500_register_dump
+{
+	u8 bank;
+	u8 reg;
+	u8 value;
+} ab8500_complete_register_dump[DUMP_MAX_REGS];
+
+extern int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
+
+/* This shall only be called upon kernel panic! */
+void ab8500_dump_all_banks_to_mem(void)
+{
+	int i, r = 0;
+	u8 bank;
+	int err = 0;
+
+	pr_info("Saving all ABB registers at \"ab8500_complete_register_dump\" "
+		"for crash analyze.\n");
+
+	for (bank = 0; bank < AB8500_NUM_BANKS; bank++) {
+		for (i = 0; i < debug_ranges[bank].num_ranges; i++) {
+			u8 reg;
+
+			for (reg = debug_ranges[bank].range[i].first;
+			     reg <= debug_ranges[bank].range[i].last;
+			     reg++) {
+				u8 value;
+
+				err = prcmu_abb_read(bank, reg, &value, 1);
+
+				if (err < 0)
+					goto out;
+
+				ab8500_complete_register_dump[r].bank = bank;
+				ab8500_complete_register_dump[r].reg = reg;
+				ab8500_complete_register_dump[r].value = value;
+
+				r++;
+
+				if (r >= DUMP_MAX_REGS) {
+					pr_err("%s: too many register to dump!\n",
+						__func__);
+					err = -EINVAL;
+					goto out;
+				}
+			}
+		}
+	}
+out:
+	if (err >= 0)
+		pr_info("Saved all ABB registers.\n");
+	else
+		pr_info("Failed to save all ABB registers.\n");
+}
+
 static int ab8500_all_banks_open(struct inode *inode, struct file *file)
 {
 	struct seq_file *s;
@@ -625,7 +1463,7 @@
 
 static int ab8500_bank_print(struct seq_file *s, void *p)
 {
-	return seq_printf(s, "%d\n", debug_bank);
+	return seq_printf(s, "0x%02X\n", debug_bank);
 }
 
 static int ab8500_bank_open(struct inode *inode, struct file *file)
@@ -641,7 +1479,6 @@
 	unsigned long user_bank;
 	int err;
 
-	/* Get userspace string and assure termination */
 	err = kstrtoul_from_user(user_buf, count, 0, &user_bank);
 	if (err)
 		return err;
@@ -667,14 +1504,13 @@
 }
 
 static ssize_t ab8500_address_write(struct file *file,
-	const char __user *user_buf,
-	size_t count, loff_t *ppos)
+				    const char __user *user_buf,
+				    size_t count, loff_t *ppos)
 {
 	struct device *dev = ((struct seq_file *)(file->private_data))->private;
 	unsigned long user_address;
 	int err;
 
-	/* Get userspace string and assure termination */
 	err = kstrtoul_from_user(user_buf, count, 0, &user_address);
 	if (err)
 		return err;
@@ -684,6 +1520,7 @@
 		return -EINVAL;
 	}
 	debug_address = user_address;
+
 	return count;
 }
 
@@ -711,14 +1548,13 @@
 }
 
 static ssize_t ab8500_val_write(struct file *file,
-	const char __user *user_buf,
-	size_t count, loff_t *ppos)
+				const char __user *user_buf,
+				size_t count, loff_t *ppos)
 {
 	struct device *dev = ((struct seq_file *)(file->private_data))->private;
 	unsigned long user_val;
 	int err;
 
-	/* Get userspace string and assure termination */
 	err = kstrtoul_from_user(user_buf, count, 0, &user_val);
 	if (err)
 		return err;
@@ -741,22 +1577,46 @@
  * Interrupt status
  */
 static u32 num_interrupts[AB8500_MAX_NR_IRQS];
+static u32 num_wake_interrupts[AB8500_MAX_NR_IRQS];
 static int num_interrupt_lines;
 
+bool __attribute__((weak)) suspend_test_wake_cause_interrupt_is_mine(u32 my_int)
+{
+	return false;
+}
+
 void ab8500_debug_register_interrupt(int line)
 {
-	if (line < num_interrupt_lines)
+	if (line < num_interrupt_lines) {
 		num_interrupts[line]++;
+		if (suspend_test_wake_cause_interrupt_is_mine(IRQ_DB8500_AB8500))
+			num_wake_interrupts[line]++;
+	}
 }
 
 static int ab8500_interrupts_print(struct seq_file *s, void *p)
 {
 	int line;
 
-	seq_printf(s, "irq:  number of\n");
+	seq_printf(s, "name: number:  number of: wake:\n");
 
-	for (line = 0; line < num_interrupt_lines; line++)
-		seq_printf(s, "%3i:  %6i\n", line, num_interrupts[line]);
+	for (line = 0; line < num_interrupt_lines; line++) {
+		struct irq_desc *desc = irq_to_desc(line + irq_first);
+		struct irqaction *action = desc->action;
+
+		seq_printf(s, "%3i:  %6i %4i", line,
+			   num_interrupts[line],
+			   num_wake_interrupts[line]);
+
+		if (desc && desc->name)
+			seq_printf(s, "-%-8s", desc->name);
+		if (action) {
+			seq_printf(s, "  %s", action->name);
+			while ((action = action->next) != NULL)
+				seq_printf(s, ", %s", action->name);
+		}
+		seq_putc(s, '\n');
+	}
 
 	return 0;
 }
@@ -801,6 +1661,79 @@
 	return single_open(file, ab8500_hwreg_print, inode->i_private);
 }
 
+#define AB8500_SUPPLY_CONTROL_CONFIG_1 0x01
+#define AB8500_SUPPLY_CONTROL_REG 0x00
+#define AB8500_FIRST_SIM_REG 0x80
+#define AB8500_LAST_SIM_REG 0x8B
+#define AB8505_LAST_SIM_REG 0x8C
+
+static int ab8500_print_modem_registers(struct seq_file *s, void *p)
+{
+	struct device *dev = s->private;
+	struct ab8500 *ab8500;
+	int err;
+	u8 value;
+	u8 orig_value;
+	u32 bank = AB8500_REGU_CTRL2;
+	u32 last_sim_reg = AB8500_LAST_SIM_REG;
+	u32 reg;
+
+	ab8500 = dev_get_drvdata(dev->parent);
+	dev_warn(dev, "WARNING! This operation can interfer with modem side\n"
+		"and should only be done with care\n");
+
+	err = abx500_get_register_interruptible(dev,
+		AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG, &orig_value);
+	if (err < 0) {
+		dev_err(dev, "ab->read fail %d\n", err);
+		return err;
+	}
+	/* Config 1 will allow APE side to read SIM registers */
+	err = abx500_set_register_interruptible(dev,
+		AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG,
+		AB8500_SUPPLY_CONTROL_CONFIG_1);
+	if (err < 0) {
+		dev_err(dev, "ab->write fail %d\n", err);
+		return err;
+	}
+
+	seq_printf(s, " bank 0x%02X:\n", bank);
+
+	if (is_ab9540(ab8500) || is_ab8505(ab8500))
+		last_sim_reg = AB8505_LAST_SIM_REG;
+
+	for (reg = AB8500_FIRST_SIM_REG; reg <= last_sim_reg; reg++) {
+		err = abx500_get_register_interruptible(dev,
+			bank, reg, &value);
+		if (err < 0) {
+			dev_err(dev, "ab->read fail %d\n", err);
+			return err;
+		}
+		err = seq_printf(s, "  [0x%02X/0x%02X]: 0x%02X\n",
+			bank, reg, value);
+	}
+	err = abx500_set_register_interruptible(dev,
+		AB8500_REGU_CTRL1, AB8500_SUPPLY_CONTROL_REG, orig_value);
+	if (err < 0) {
+		dev_err(dev, "ab->write fail %d\n", err);
+		return err;
+	}
+	return 0;
+}
+
+static int ab8500_modem_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ab8500_print_modem_registers, inode->i_private);
+}
+
+static const struct file_operations ab8500_modem_fops = {
+	.open = ab8500_modem_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
 static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
 {
 	int bat_ctrl_raw;
@@ -808,12 +1741,13 @@
 	struct ab8500_gpadc *gpadc;
 
 	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
-	bat_ctrl_raw = ab8500_gpadc_read_raw(gpadc, BAT_CTRL);
+	bat_ctrl_raw = ab8500_gpadc_read_raw(gpadc, BAT_CTRL,
+		avg_sample, trig_edge, trig_timer, conv_type);
 	bat_ctrl_convert = ab8500_gpadc_ad_to_voltage(gpadc,
-			BAT_CTRL, bat_ctrl_raw);
+		BAT_CTRL, bat_ctrl_raw);
 
 	return seq_printf(s, "%d,0x%X\n",
-			bat_ctrl_convert, bat_ctrl_raw);
+		bat_ctrl_convert, bat_ctrl_raw);
 }
 
 static int ab8500_gpadc_bat_ctrl_open(struct inode *inode, struct file *file)
@@ -836,16 +1770,17 @@
 	struct ab8500_gpadc *gpadc;
 
 	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
-	btemp_ball_raw = ab8500_gpadc_read_raw(gpadc, BTEMP_BALL);
+	btemp_ball_raw = ab8500_gpadc_read_raw(gpadc, BTEMP_BALL,
+		avg_sample, trig_edge, trig_timer, conv_type);
 	btemp_ball_convert = ab8500_gpadc_ad_to_voltage(gpadc, BTEMP_BALL,
-			btemp_ball_raw);
+		btemp_ball_raw);
 
 	return seq_printf(s,
-			"%d,0x%X\n", btemp_ball_convert, btemp_ball_raw);
+		"%d,0x%X\n", btemp_ball_convert, btemp_ball_raw);
 }
 
 static int ab8500_gpadc_btemp_ball_open(struct inode *inode,
-		struct file *file)
+					struct file *file)
 {
 	return single_open(file, ab8500_gpadc_btemp_ball_print, inode->i_private);
 }
@@ -865,19 +1800,20 @@
 	struct ab8500_gpadc *gpadc;
 
 	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
-	main_charger_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_V);
+	main_charger_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_V,
+		avg_sample, trig_edge, trig_timer, conv_type);
 	main_charger_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
-			MAIN_CHARGER_V, main_charger_v_raw);
+		MAIN_CHARGER_V, main_charger_v_raw);
 
 	return seq_printf(s, "%d,0x%X\n",
 			main_charger_v_convert, main_charger_v_raw);
 }
 
 static int ab8500_gpadc_main_charger_v_open(struct inode *inode,
-		struct file *file)
+					    struct file *file)
 {
 	return single_open(file, ab8500_gpadc_main_charger_v_print,
-			inode->i_private);
+		inode->i_private);
 }
 
 static const struct file_operations ab8500_gpadc_main_charger_v_fops = {
@@ -895,19 +1831,20 @@
 	struct ab8500_gpadc *gpadc;
 
 	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
-	acc_detect1_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT1);
+	acc_detect1_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT1,
+		avg_sample, trig_edge, trig_timer, conv_type);
 	acc_detect1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ACC_DETECT1,
-			acc_detect1_raw);
+		acc_detect1_raw);
 
 	return seq_printf(s, "%d,0x%X\n",
-			acc_detect1_convert, acc_detect1_raw);
+		acc_detect1_convert, acc_detect1_raw);
 }
 
 static int ab8500_gpadc_acc_detect1_open(struct inode *inode,
-		struct file *file)
+					 struct file *file)
 {
 	return single_open(file, ab8500_gpadc_acc_detect1_print,
-			inode->i_private);
+		inode->i_private);
 }
 
 static const struct file_operations ab8500_gpadc_acc_detect1_fops = {
@@ -925,19 +1862,20 @@
 	struct ab8500_gpadc *gpadc;
 
 	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
-	acc_detect2_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT2);
+	acc_detect2_raw = ab8500_gpadc_read_raw(gpadc, ACC_DETECT2,
+		avg_sample, trig_edge, trig_timer, conv_type);
 	acc_detect2_convert = ab8500_gpadc_ad_to_voltage(gpadc,
-	    ACC_DETECT2, acc_detect2_raw);
+		ACC_DETECT2, acc_detect2_raw);
 
 	return seq_printf(s, "%d,0x%X\n",
-			acc_detect2_convert, acc_detect2_raw);
+		acc_detect2_convert, acc_detect2_raw);
 }
 
 static int ab8500_gpadc_acc_detect2_open(struct inode *inode,
 		struct file *file)
 {
 	return single_open(file, ab8500_gpadc_acc_detect2_print,
-	    inode->i_private);
+		inode->i_private);
 }
 
 static const struct file_operations ab8500_gpadc_acc_detect2_fops = {
@@ -955,12 +1893,13 @@
 	struct ab8500_gpadc *gpadc;
 
 	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
-	aux1_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX1);
+	aux1_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX1,
+		avg_sample, trig_edge, trig_timer, conv_type);
 	aux1_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX1,
-			aux1_raw);
+		aux1_raw);
 
 	return seq_printf(s, "%d,0x%X\n",
-			aux1_convert, aux1_raw);
+		aux1_convert, aux1_raw);
 }
 
 static int ab8500_gpadc_aux1_open(struct inode *inode, struct file *file)
@@ -983,9 +1922,10 @@
 	struct ab8500_gpadc *gpadc;
 
 	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
-	aux2_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX2);
+	aux2_raw = ab8500_gpadc_read_raw(gpadc, ADC_AUX2,
+		avg_sample, trig_edge, trig_timer, conv_type);
 	aux2_convert = ab8500_gpadc_ad_to_voltage(gpadc, ADC_AUX2,
-			aux2_raw);
+		aux2_raw);
 
 	return seq_printf(s, "%d,0x%X\n",
 			aux2_convert, aux2_raw);
@@ -1011,16 +1951,17 @@
 	struct ab8500_gpadc *gpadc;
 
 	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
-	main_bat_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_BAT_V);
+	main_bat_v_raw = ab8500_gpadc_read_raw(gpadc, MAIN_BAT_V,
+		avg_sample, trig_edge, trig_timer, conv_type);
 	main_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, MAIN_BAT_V,
-			main_bat_v_raw);
+		main_bat_v_raw);
 
 	return seq_printf(s, "%d,0x%X\n",
-			main_bat_v_convert, main_bat_v_raw);
+		main_bat_v_convert, main_bat_v_raw);
 }
 
 static int ab8500_gpadc_main_bat_v_open(struct inode *inode,
-		struct file *file)
+					struct file *file)
 {
 	return single_open(file, ab8500_gpadc_main_bat_v_print, inode->i_private);
 }
@@ -1040,12 +1981,13 @@
 	struct ab8500_gpadc *gpadc;
 
 	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
-	vbus_v_raw = ab8500_gpadc_read_raw(gpadc, VBUS_V);
+	vbus_v_raw =  ab8500_gpadc_read_raw(gpadc, VBUS_V,
+		avg_sample, trig_edge, trig_timer, conv_type);
 	vbus_v_convert = ab8500_gpadc_ad_to_voltage(gpadc, VBUS_V,
-			vbus_v_raw);
+		vbus_v_raw);
 
 	return seq_printf(s, "%d,0x%X\n",
-			vbus_v_convert, vbus_v_raw);
+		vbus_v_convert, vbus_v_raw);
 }
 
 static int ab8500_gpadc_vbus_v_open(struct inode *inode, struct file *file)
@@ -1068,19 +2010,20 @@
 	struct ab8500_gpadc *gpadc;
 
 	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
-	main_charger_c_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_C);
+	main_charger_c_raw = ab8500_gpadc_read_raw(gpadc, MAIN_CHARGER_C,
+		avg_sample, trig_edge, trig_timer, conv_type);
 	main_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
-			MAIN_CHARGER_C, main_charger_c_raw);
+		MAIN_CHARGER_C, main_charger_c_raw);
 
 	return seq_printf(s, "%d,0x%X\n",
-			main_charger_c_convert, main_charger_c_raw);
+		main_charger_c_convert, main_charger_c_raw);
 }
 
 static int ab8500_gpadc_main_charger_c_open(struct inode *inode,
 		struct file *file)
 {
 	return single_open(file, ab8500_gpadc_main_charger_c_print,
-			inode->i_private);
+		inode->i_private);
 }
 
 static const struct file_operations ab8500_gpadc_main_charger_c_fops = {
@@ -1098,19 +2041,20 @@
 	struct ab8500_gpadc *gpadc;
 
 	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
-	usb_charger_c_raw = ab8500_gpadc_read_raw(gpadc, USB_CHARGER_C);
+	usb_charger_c_raw = ab8500_gpadc_read_raw(gpadc, USB_CHARGER_C,
+		avg_sample, trig_edge, trig_timer, conv_type);
 	usb_charger_c_convert = ab8500_gpadc_ad_to_voltage(gpadc,
-	    USB_CHARGER_C, usb_charger_c_raw);
+		USB_CHARGER_C, usb_charger_c_raw);
 
 	return seq_printf(s, "%d,0x%X\n",
-			usb_charger_c_convert, usb_charger_c_raw);
+		usb_charger_c_convert, usb_charger_c_raw);
 }
 
 static int ab8500_gpadc_usb_charger_c_open(struct inode *inode,
 		struct file *file)
 {
 	return single_open(file, ab8500_gpadc_usb_charger_c_print,
-	    inode->i_private);
+		inode->i_private);
 }
 
 static const struct file_operations ab8500_gpadc_usb_charger_c_fops = {
@@ -1128,12 +2072,13 @@
 	struct ab8500_gpadc *gpadc;
 
 	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
-	bk_bat_v_raw = ab8500_gpadc_read_raw(gpadc, BK_BAT_V);
+	bk_bat_v_raw = ab8500_gpadc_read_raw(gpadc, BK_BAT_V,
+		avg_sample, trig_edge, trig_timer, conv_type);
 	bk_bat_v_convert = ab8500_gpadc_ad_to_voltage(gpadc,
-			BK_BAT_V, bk_bat_v_raw);
+		BK_BAT_V, bk_bat_v_raw);
 
 	return seq_printf(s, "%d,0x%X\n",
-			bk_bat_v_convert, bk_bat_v_raw);
+		bk_bat_v_convert, bk_bat_v_raw);
 }
 
 static int ab8500_gpadc_bk_bat_v_open(struct inode *inode, struct file *file)
@@ -1156,12 +2101,13 @@
 	struct ab8500_gpadc *gpadc;
 
 	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
-	die_temp_raw = ab8500_gpadc_read_raw(gpadc, DIE_TEMP);
+	die_temp_raw = ab8500_gpadc_read_raw(gpadc, DIE_TEMP,
+		avg_sample, trig_edge, trig_timer, conv_type);
 	die_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, DIE_TEMP,
-			die_temp_raw);
+		die_temp_raw);
 
 	return seq_printf(s, "%d,0x%X\n",
-			die_temp_convert, die_temp_raw);
+		die_temp_convert, die_temp_raw);
 }
 
 static int ab8500_gpadc_die_temp_open(struct inode *inode, struct file *file)
@@ -1177,6 +2123,453 @@
 	.owner = THIS_MODULE,
 };
 
+static int ab8500_gpadc_usb_id_print(struct seq_file *s, void *p)
+{
+	int usb_id_raw;
+	int usb_id_convert;
+	struct ab8500_gpadc *gpadc;
+
+	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+	usb_id_raw = ab8500_gpadc_read_raw(gpadc, USB_ID,
+		avg_sample, trig_edge, trig_timer, conv_type);
+	usb_id_convert = ab8500_gpadc_ad_to_voltage(gpadc, USB_ID,
+		usb_id_raw);
+
+	return seq_printf(s, "%d,0x%X\n",
+		usb_id_convert, usb_id_raw);
+}
+
+static int ab8500_gpadc_usb_id_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ab8500_gpadc_usb_id_print, inode->i_private);
+}
+
+static const struct file_operations ab8500_gpadc_usb_id_fops = {
+	.open = ab8500_gpadc_usb_id_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static int ab8540_gpadc_xtal_temp_print(struct seq_file *s, void *p)
+{
+	int xtal_temp_raw;
+	int xtal_temp_convert;
+	struct ab8500_gpadc *gpadc;
+
+	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+	xtal_temp_raw = ab8500_gpadc_read_raw(gpadc, XTAL_TEMP,
+		avg_sample, trig_edge, trig_timer, conv_type);
+	xtal_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, XTAL_TEMP,
+		xtal_temp_raw);
+
+	return seq_printf(s, "%d,0x%X\n",
+		xtal_temp_convert, xtal_temp_raw);
+}
+
+static int ab8540_gpadc_xtal_temp_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ab8540_gpadc_xtal_temp_print,
+		inode->i_private);
+}
+
+static const struct file_operations ab8540_gpadc_xtal_temp_fops = {
+	.open = ab8540_gpadc_xtal_temp_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static int ab8540_gpadc_vbat_true_meas_print(struct seq_file *s, void *p)
+{
+	int vbat_true_meas_raw;
+	int vbat_true_meas_convert;
+	struct ab8500_gpadc *gpadc;
+
+	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+	vbat_true_meas_raw = ab8500_gpadc_read_raw(gpadc, VBAT_TRUE_MEAS,
+		avg_sample, trig_edge, trig_timer, conv_type);
+	vbat_true_meas_convert = ab8500_gpadc_ad_to_voltage(gpadc, VBAT_TRUE_MEAS,
+		vbat_true_meas_raw);
+
+	return seq_printf(s, "%d,0x%X\n",
+		vbat_true_meas_convert, vbat_true_meas_raw);
+}
+
+static int ab8540_gpadc_vbat_true_meas_open(struct inode *inode,
+		struct file *file)
+{
+	return single_open(file, ab8540_gpadc_vbat_true_meas_print,
+		inode->i_private);
+}
+
+static const struct file_operations ab8540_gpadc_vbat_true_meas_fops = {
+	.open = ab8540_gpadc_vbat_true_meas_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static int ab8540_gpadc_bat_ctrl_and_ibat_print(struct seq_file *s, void *p)
+{
+	int bat_ctrl_raw;
+	int bat_ctrl_convert;
+	int ibat_raw;
+	int ibat_convert;
+	struct ab8500_gpadc *gpadc;
+
+	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+	bat_ctrl_raw = ab8500_gpadc_double_read_raw(gpadc, BAT_CTRL_AND_IBAT,
+		avg_sample, trig_edge, trig_timer, conv_type, &ibat_raw);
+
+	bat_ctrl_convert = ab8500_gpadc_ad_to_voltage(gpadc, BAT_CTRL,
+		bat_ctrl_raw);
+	ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
+		ibat_raw);
+
+	return seq_printf(s, "%d,0x%X\n"  "%d,0x%X\n",
+		bat_ctrl_convert, bat_ctrl_raw,
+		ibat_convert, ibat_raw);
+}
+
+static int ab8540_gpadc_bat_ctrl_and_ibat_open(struct inode *inode,
+		struct file *file)
+{
+	return single_open(file, ab8540_gpadc_bat_ctrl_and_ibat_print,
+		inode->i_private);
+}
+
+static const struct file_operations ab8540_gpadc_bat_ctrl_and_ibat_fops = {
+	.open = ab8540_gpadc_bat_ctrl_and_ibat_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static int ab8540_gpadc_vbat_meas_and_ibat_print(struct seq_file *s, void *p)
+{
+	int vbat_meas_raw;
+	int vbat_meas_convert;
+	int ibat_raw;
+	int ibat_convert;
+	struct ab8500_gpadc *gpadc;
+
+	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+	vbat_meas_raw = ab8500_gpadc_double_read_raw(gpadc, VBAT_MEAS_AND_IBAT,
+		avg_sample, trig_edge, trig_timer, conv_type, &ibat_raw);
+	vbat_meas_convert = ab8500_gpadc_ad_to_voltage(gpadc, MAIN_BAT_V,
+		vbat_meas_raw);
+	ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
+		ibat_raw);
+
+	return seq_printf(s, "%d,0x%X\n"  "%d,0x%X\n",
+		vbat_meas_convert, vbat_meas_raw,
+		ibat_convert, ibat_raw);
+}
+
+static int ab8540_gpadc_vbat_meas_and_ibat_open(struct inode *inode,
+		struct file *file)
+{
+	return single_open(file, ab8540_gpadc_vbat_meas_and_ibat_print,
+		inode->i_private);
+}
+
+static const struct file_operations ab8540_gpadc_vbat_meas_and_ibat_fops = {
+	.open = ab8540_gpadc_vbat_meas_and_ibat_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static int ab8540_gpadc_vbat_true_meas_and_ibat_print(struct seq_file *s, void *p)
+{
+	int vbat_true_meas_raw;
+	int vbat_true_meas_convert;
+	int ibat_raw;
+	int ibat_convert;
+	struct ab8500_gpadc *gpadc;
+
+	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+	vbat_true_meas_raw = ab8500_gpadc_double_read_raw(gpadc,
+			VBAT_TRUE_MEAS_AND_IBAT, avg_sample, trig_edge,
+			trig_timer, conv_type, &ibat_raw);
+	vbat_true_meas_convert = ab8500_gpadc_ad_to_voltage(gpadc,
+			VBAT_TRUE_MEAS, vbat_true_meas_raw);
+	ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
+		ibat_raw);
+
+	return seq_printf(s, "%d,0x%X\n"  "%d,0x%X\n",
+		vbat_true_meas_convert, vbat_true_meas_raw,
+		ibat_convert, ibat_raw);
+}
+
+static int ab8540_gpadc_vbat_true_meas_and_ibat_open(struct inode *inode,
+		struct file *file)
+{
+	return single_open(file, ab8540_gpadc_vbat_true_meas_and_ibat_print,
+		inode->i_private);
+}
+
+static const struct file_operations ab8540_gpadc_vbat_true_meas_and_ibat_fops = {
+	.open = ab8540_gpadc_vbat_true_meas_and_ibat_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static int ab8540_gpadc_bat_temp_and_ibat_print(struct seq_file *s, void *p)
+{
+	int bat_temp_raw;
+	int bat_temp_convert;
+	int ibat_raw;
+	int ibat_convert;
+	struct ab8500_gpadc *gpadc;
+
+	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+	bat_temp_raw = ab8500_gpadc_double_read_raw(gpadc, BAT_TEMP_AND_IBAT,
+		avg_sample, trig_edge, trig_timer, conv_type, &ibat_raw);
+	bat_temp_convert = ab8500_gpadc_ad_to_voltage(gpadc, BTEMP_BALL,
+		bat_temp_raw);
+	ibat_convert = ab8500_gpadc_ad_to_voltage(gpadc, IBAT_VIRTUAL_CHANNEL,
+		ibat_raw);
+
+	return seq_printf(s, "%d,0x%X\n"  "%d,0x%X\n",
+		bat_temp_convert, bat_temp_raw,
+		ibat_convert, ibat_raw);
+}
+
+static int ab8540_gpadc_bat_temp_and_ibat_open(struct inode *inode,
+		struct file *file)
+{
+	return single_open(file, ab8540_gpadc_bat_temp_and_ibat_print,
+		inode->i_private);
+}
+
+static const struct file_operations ab8540_gpadc_bat_temp_and_ibat_fops = {
+	.open = ab8540_gpadc_bat_temp_and_ibat_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static int ab8540_gpadc_otp_cal_print(struct seq_file *s, void *p)
+{
+	struct ab8500_gpadc *gpadc;
+	u16 vmain_l, vmain_h, btemp_l, btemp_h;
+	u16 vbat_l, vbat_h, ibat_l, ibat_h;
+
+	gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
+	ab8540_gpadc_get_otp(gpadc, &vmain_l, &vmain_h, &btemp_l, &btemp_h,
+			&vbat_l, &vbat_h, &ibat_l, &ibat_h);
+	return seq_printf(s, "VMAIN_L:0x%X\n"
+		"VMAIN_H:0x%X\n"
+		"BTEMP_L:0x%X\n"
+		"BTEMP_H:0x%X\n"
+		"VBAT_L:0x%X\n"
+		"VBAT_H:0x%X\n"
+		"IBAT_L:0x%X\n"
+		"IBAT_H:0x%X\n",
+		vmain_l, vmain_h, btemp_l, btemp_h, vbat_l, vbat_h, ibat_l, ibat_h);
+}
+
+static int ab8540_gpadc_otp_cal_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ab8540_gpadc_otp_cal_print, inode->i_private);
+}
+
+static const struct file_operations ab8540_gpadc_otp_calib_fops = {
+	.open = ab8540_gpadc_otp_cal_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_avg_sample_print(struct seq_file *s, void *p)
+{
+	return seq_printf(s, "%d\n", avg_sample);
+}
+
+static int ab8500_gpadc_avg_sample_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ab8500_gpadc_avg_sample_print,
+		inode->i_private);
+}
+
+static ssize_t ab8500_gpadc_avg_sample_write(struct file *file,
+	const char __user *user_buf,
+	size_t count, loff_t *ppos)
+{
+	struct device *dev = ((struct seq_file *)(file->private_data))->private;
+	unsigned long user_avg_sample;
+	int err;
+
+	err = kstrtoul_from_user(user_buf, count, 0, &user_avg_sample);
+	if (err)
+		return err;
+
+	if ((user_avg_sample == SAMPLE_1) || (user_avg_sample == SAMPLE_4)
+			|| (user_avg_sample == SAMPLE_8)
+			|| (user_avg_sample == SAMPLE_16)) {
+		avg_sample = (u8) user_avg_sample;
+	} else {
+		dev_err(dev, "debugfs error input: "
+			"should be egal to 1, 4, 8 or 16\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static const struct file_operations ab8500_gpadc_avg_sample_fops = {
+	.open = ab8500_gpadc_avg_sample_open,
+	.read = seq_read,
+	.write = ab8500_gpadc_avg_sample_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_trig_edge_print(struct seq_file *s, void *p)
+{
+	return seq_printf(s, "%d\n", trig_edge);
+}
+
+static int ab8500_gpadc_trig_edge_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ab8500_gpadc_trig_edge_print,
+		inode->i_private);
+}
+
+static ssize_t ab8500_gpadc_trig_edge_write(struct file *file,
+	const char __user *user_buf,
+	size_t count, loff_t *ppos)
+{
+	struct device *dev = ((struct seq_file *)(file->private_data))->private;
+	unsigned long user_trig_edge;
+	int err;
+
+	err = kstrtoul_from_user(user_buf, count, 0, &user_trig_edge);
+	if (err)
+		return err;
+
+	if ((user_trig_edge == RISING_EDGE)
+			|| (user_trig_edge == FALLING_EDGE)) {
+		trig_edge = (u8) user_trig_edge;
+	} else {
+		dev_err(dev, "Wrong input:\n"
+			"Enter 0. Rising edge\n"
+			"Enter 1. Falling edge\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static const struct file_operations ab8500_gpadc_trig_edge_fops = {
+	.open = ab8500_gpadc_trig_edge_open,
+	.read = seq_read,
+	.write = ab8500_gpadc_trig_edge_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_trig_timer_print(struct seq_file *s, void *p)
+{
+	return seq_printf(s, "%d\n", trig_timer);
+}
+
+static int ab8500_gpadc_trig_timer_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ab8500_gpadc_trig_timer_print,
+		inode->i_private);
+}
+
+static ssize_t ab8500_gpadc_trig_timer_write(struct file *file,
+	const char __user *user_buf,
+	size_t count, loff_t *ppos)
+{
+	struct device *dev = ((struct seq_file *)(file->private_data))->private;
+	unsigned long user_trig_timer;
+	int err;
+
+	err = kstrtoul_from_user(user_buf, count, 0, &user_trig_timer);
+	if (err)
+		return err;
+
+	if ((user_trig_timer >= 0) && (user_trig_timer <= 255)) {
+		trig_timer = (u8) user_trig_timer;
+	} else {
+		dev_err(dev, "debugfs error input: "
+			"should be beetween 0 to 255\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static const struct file_operations ab8500_gpadc_trig_timer_fops = {
+	.open = ab8500_gpadc_trig_timer_open,
+	.read = seq_read,
+	.write = ab8500_gpadc_trig_timer_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
+static int ab8500_gpadc_conv_type_print(struct seq_file *s, void *p)
+{
+	return seq_printf(s, "%d\n", conv_type);
+}
+
+static int ab8500_gpadc_conv_type_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ab8500_gpadc_conv_type_print,
+		inode->i_private);
+}
+
+static ssize_t ab8500_gpadc_conv_type_write(struct file *file,
+	const char __user *user_buf,
+	size_t count, loff_t *ppos)
+{
+	struct device *dev = ((struct seq_file *)(file->private_data))->private;
+	unsigned long user_conv_type;
+	int err;
+
+	err = kstrtoul_from_user(user_buf, count, 0, &user_conv_type);
+	if (err)
+		return err;
+
+	if ((user_conv_type == ADC_SW)
+			|| (user_conv_type == ADC_HW)) {
+		conv_type = (u8) user_conv_type;
+	} else {
+		dev_err(dev, "Wrong input:\n"
+			"Enter 0. ADC SW conversion\n"
+			"Enter 1. ADC HW conversion\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static const struct file_operations ab8500_gpadc_conv_type_fops = {
+	.open = ab8500_gpadc_conv_type_open,
+	.read = seq_read,
+	.write = ab8500_gpadc_conv_type_write,
+	.llseek = seq_lseek,
+	.release = single_release,
+	.owner = THIS_MODULE,
+};
+
 /*
  * return length of an ASCII numerical value, 0 is string is not a
  * numerical value.
@@ -1352,7 +2745,7 @@
 					     struct file *file)
 {
 	return single_open(file, ab8500_subscribe_unsubscribe_print,
-			   inode->i_private);
+		inode->i_private);
 }
 
 /*
@@ -1382,21 +2775,14 @@
 				      size_t count, loff_t *ppos)
 {
 	struct device *dev = ((struct seq_file *)(file->private_data))->private;
-	char buf[32];
-	int buf_size;
 	unsigned long user_val;
 	int err;
 	unsigned int irq_index;
 
-	/* Get userspace string and assure termination */
-	buf_size = min(count, (sizeof(buf)-1));
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-	buf[buf_size] = 0;
-
-	err = strict_strtoul(buf, 0, &user_val);
+	err = kstrtoul_from_user(user_buf, count, 0, &user_val);
 	if (err)
-		return -EINVAL;
+		return err;
+
 	if (user_val < irq_first) {
 		dev_err(dev, "debugfs error input < %d\n", irq_first);
 		return -EINVAL;
@@ -1416,7 +2802,7 @@
 	 */
 	dev_attr[irq_index] = kmalloc(sizeof(struct device_attribute),
 		GFP_KERNEL);
-	event_name[irq_index] = kmalloc(buf_size, GFP_KERNEL);
+	event_name[irq_index] = kmalloc(count, GFP_KERNEL);
 	sprintf(event_name[irq_index], "%lu", user_val);
 	dev_attr[irq_index]->show = show_irq;
 	dev_attr[irq_index]->store = NULL;
@@ -1438,7 +2824,7 @@
 		return err;
 	}
 
-	return buf_size;
+	return count;
 }
 
 static ssize_t ab8500_unsubscribe_write(struct file *file,
@@ -1446,21 +2832,14 @@
 					size_t count, loff_t *ppos)
 {
 	struct device *dev = ((struct seq_file *)(file->private_data))->private;
-	char buf[32];
-	int buf_size;
 	unsigned long user_val;
 	int err;
 	unsigned int irq_index;
 
-	/* Get userspace string and assure termination */
-	buf_size = min(count, (sizeof(buf)-1));
-	if (copy_from_user(buf, user_buf, buf_size))
-		return -EFAULT;
-	buf[buf_size] = 0;
-
-	err = strict_strtoul(buf, 0, &user_val);
+	err = kstrtoul_from_user(user_buf, count, 0, &user_val);
 	if (err)
-		return -EINVAL;
+		return err;
+
 	if (user_val < irq_first) {
 		dev_err(dev, "debugfs error input < %d\n", irq_first);
 		return -EINVAL;
@@ -1485,7 +2864,7 @@
 	kfree(event_name[irq_index]);
 	kfree(dev_attr[irq_index]);
 
-	return buf_size;
+	return count;
 }
 
 /*
@@ -1583,7 +2962,7 @@
 	irq_first = platform_get_irq_byname(plf, "IRQ_FIRST");
 	if (irq_first < 0) {
 		dev_err(&plf->dev, "First irq not found, err %d\n",
-				irq_first);
+			irq_first);
 		ret = irq_first;
 		goto out_freeevent_name;
 	}
@@ -1591,9 +2970,9 @@
 	irq_last = platform_get_irq_byname(plf, "IRQ_LAST");
 	if (irq_last < 0) {
 		dev_err(&plf->dev, "Last irq not found, err %d\n",
-				irq_last);
+			irq_last);
 		ret = irq_last;
-                goto out_freeevent_name;
+		goto out_freeevent_name;
 	}
 
 	ab8500_dir = debugfs_create_dir(AB8500_NAME_STRING, NULL);
@@ -1601,124 +2980,198 @@
 		goto err;
 
 	ab8500_gpadc_dir = debugfs_create_dir(AB8500_ADC_NAME_STRING,
-	    ab8500_dir);
+		ab8500_dir);
 	if (!ab8500_gpadc_dir)
 		goto err;
 
 	file = debugfs_create_file("all-bank-registers", S_IRUGO,
-	    ab8500_dir, &plf->dev, &ab8500_registers_fops);
+		ab8500_dir, &plf->dev, &ab8500_registers_fops);
 	if (!file)
 		goto err;
 
 	file = debugfs_create_file("all-banks", S_IRUGO,
-	    ab8500_dir, &plf->dev, &ab8500_all_banks_fops);
+		ab8500_dir, &plf->dev, &ab8500_all_banks_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("register-bank", (S_IRUGO | S_IWUSR),
-	    ab8500_dir, &plf->dev, &ab8500_bank_fops);
+	file = debugfs_create_file("register-bank", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_dir, &plf->dev, &ab8500_bank_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("register-address", (S_IRUGO | S_IWUSR),
-	    ab8500_dir, &plf->dev, &ab8500_address_fops);
+	file = debugfs_create_file("register-address", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_dir, &plf->dev, &ab8500_address_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("register-value", (S_IRUGO | S_IWUSR),
-	    ab8500_dir, &plf->dev, &ab8500_val_fops);
+	file = debugfs_create_file("register-value", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_dir, &plf->dev, &ab8500_val_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("irq-subscribe", (S_IRUGO | S_IWUSR),
-	    ab8500_dir, &plf->dev, &ab8500_subscribe_fops);
+	file = debugfs_create_file("irq-subscribe", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_dir, &plf->dev, &ab8500_subscribe_fops);
 	if (!file)
 		goto err;
 
-	if (is_ab8500(ab8500))
+	if (is_ab8500(ab8500)) {
+		debug_ranges = ab8500_debug_ranges;
 		num_interrupt_lines = AB8500_NR_IRQS;
-	else if (is_ab8505(ab8500))
+	} else if (is_ab8505(ab8500)) {
+		debug_ranges = ab8505_debug_ranges;
 		num_interrupt_lines = AB8505_NR_IRQS;
-	else if (is_ab9540(ab8500))
+	} else if (is_ab9540(ab8500)) {
+		debug_ranges = ab8505_debug_ranges;
 		num_interrupt_lines = AB9540_NR_IRQS;
+	} else if (is_ab8540(ab8500)) {
+		debug_ranges = ab8540_debug_ranges;
+		num_interrupt_lines = AB8540_NR_IRQS;
+	}
 
 	file = debugfs_create_file("interrupts", (S_IRUGO),
-	    ab8500_dir, &plf->dev, &ab8500_interrupts_fops);
+		ab8500_dir, &plf->dev, &ab8500_interrupts_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("irq-unsubscribe", (S_IRUGO | S_IWUSR),
-	    ab8500_dir, &plf->dev, &ab8500_unsubscribe_fops);
+	file = debugfs_create_file("irq-unsubscribe", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_dir, &plf->dev, &ab8500_unsubscribe_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("hwreg", (S_IRUGO | S_IWUSR),
-	    ab8500_dir, &plf->dev, &ab8500_hwreg_fops);
+	file = debugfs_create_file("hwreg", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_dir, &plf->dev, &ab8500_hwreg_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("bat_ctrl", (S_IRUGO | S_IWUSR),
-	    ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bat_ctrl_fops);
+	file = debugfs_create_file("all-modem-registers", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_dir, &plf->dev, &ab8500_modem_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("btemp_ball", (S_IRUGO | S_IWUSR),
-	    ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_btemp_ball_fops);
+	file = debugfs_create_file("bat_ctrl", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bat_ctrl_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("main_charger_v", (S_IRUGO | S_IWUSR),
-	    ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_v_fops);
+	file = debugfs_create_file("btemp_ball", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_btemp_ball_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("acc_detect1", (S_IRUGO | S_IWUSR),
-	    ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect1_fops);
+	file = debugfs_create_file("main_charger_v", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_v_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("acc_detect2", (S_IRUGO | S_IWUSR),
-	    ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect2_fops);
+	file = debugfs_create_file("acc_detect1", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect1_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("adc_aux1", (S_IRUGO | S_IWUSR),
-	    ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux1_fops);
+	file = debugfs_create_file("acc_detect2", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect2_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("adc_aux2", (S_IRUGO | S_IWUSR),
-	    ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux2_fops);
+	file = debugfs_create_file("adc_aux1", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux1_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("main_bat_v", (S_IRUGO | S_IWUSR),
-	    ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_bat_v_fops);
+	file = debugfs_create_file("adc_aux2", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux2_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("vbus_v", (S_IRUGO | S_IWUSR),
-	    ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_vbus_v_fops);
+	file = debugfs_create_file("main_bat_v", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_bat_v_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("main_charger_c", (S_IRUGO | S_IWUSR),
-	    ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_c_fops);
+	file = debugfs_create_file("vbus_v", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_vbus_v_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("usb_charger_c", (S_IRUGO | S_IWUSR),
-	    ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_usb_charger_c_fops);
+	file = debugfs_create_file("main_charger_c", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_c_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("bk_bat_v", (S_IRUGO | S_IWUSR),
-	    ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bk_bat_v_fops);
+	file = debugfs_create_file("usb_charger_c", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_usb_charger_c_fops);
 	if (!file)
 		goto err;
 
-	file = debugfs_create_file("die_temp", (S_IRUGO | S_IWUSR),
-	    ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_die_temp_fops);
+	file = debugfs_create_file("bk_bat_v", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bk_bat_v_fops);
+	if (!file)
+		goto err;
+
+	file = debugfs_create_file("die_temp", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_die_temp_fops);
+	if (!file)
+		goto err;
+
+	file = debugfs_create_file("usb_id", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_usb_id_fops);
+	if (!file)
+		goto err;
+
+	if (is_ab8540(ab8500)) {
+		file = debugfs_create_file("xtal_temp", (S_IRUGO | S_IWUSR | S_IWGRP),
+			ab8500_gpadc_dir, &plf->dev, &ab8540_gpadc_xtal_temp_fops);
+		if (!file)
+			goto err;
+		file = debugfs_create_file("vbattruemeas", (S_IRUGO | S_IWUSR | S_IWGRP),
+			ab8500_gpadc_dir, &plf->dev,
+			&ab8540_gpadc_vbat_true_meas_fops);
+		if (!file)
+			goto err;
+		file = debugfs_create_file("batctrl_and_ibat",
+			(S_IRUGO | S_IWUGO), ab8500_gpadc_dir,
+			&plf->dev, &ab8540_gpadc_bat_ctrl_and_ibat_fops);
+		if (!file)
+			goto err;
+		file = debugfs_create_file("vbatmeas_and_ibat",
+			(S_IRUGO | S_IWUGO), ab8500_gpadc_dir,
+			&plf->dev,
+			&ab8540_gpadc_vbat_meas_and_ibat_fops);
+		if (!file)
+			goto err;
+		file = debugfs_create_file("vbattruemeas_and_ibat",
+			(S_IRUGO | S_IWUGO), ab8500_gpadc_dir,
+			&plf->dev,
+			&ab8540_gpadc_vbat_true_meas_and_ibat_fops);
+		if (!file)
+			goto err;
+		file = debugfs_create_file("battemp_and_ibat",
+			(S_IRUGO | S_IWUGO), ab8500_gpadc_dir,
+			&plf->dev, &ab8540_gpadc_bat_temp_and_ibat_fops);
+		if (!file)
+			goto err;
+		file = debugfs_create_file("otp_calib", (S_IRUGO | S_IWUSR | S_IWGRP),
+			ab8500_gpadc_dir, &plf->dev, &ab8540_gpadc_otp_calib_fops);
+		if (!file)
+			goto err;
+	}
+	file = debugfs_create_file("avg_sample", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_avg_sample_fops);
+	if (!file)
+		goto err;
+
+	file = debugfs_create_file("trig_edge", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_trig_edge_fops);
+	if (!file)
+		goto err;
+
+	file = debugfs_create_file("trig_timer", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_trig_timer_fops);
+	if (!file)
+		goto err;
+
+	file = debugfs_create_file("conv_type", (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_conv_type_fops);
 	if (!file)
 		goto err;
 
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index 5f341a5..65f7228 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -37,6 +37,13 @@
 #define AB8500_GPADC_AUTODATAL_REG	0x07
 #define AB8500_GPADC_AUTODATAH_REG	0x08
 #define AB8500_GPADC_MUX_CTRL_REG	0x09
+#define AB8540_GPADC_MANDATA2L_REG	0x09
+#define AB8540_GPADC_MANDATA2H_REG	0x0A
+#define AB8540_GPADC_APEAAX_REG		0x10
+#define AB8540_GPADC_APEAAT_REG		0x11
+#define AB8540_GPADC_APEAAM_REG		0x12
+#define AB8540_GPADC_APEAAH_REG		0x13
+#define AB8540_GPADC_APEAAL_REG		0x14
 
 /*
  * OTP register offsets
@@ -49,19 +56,29 @@
 #define AB8500_GPADC_CAL_5		0x13
 #define AB8500_GPADC_CAL_6		0x14
 #define AB8500_GPADC_CAL_7		0x15
+/* New calibration for 8540 */
+#define AB8540_GPADC_OTP4_REG_7	0x38
+#define AB8540_GPADC_OTP4_REG_6	0x39
+#define AB8540_GPADC_OTP4_REG_5	0x3A
 
 /* gpadc constants */
 #define EN_VINTCORE12			0x04
 #define EN_VTVOUT			0x02
 #define EN_GPADC			0x01
 #define DIS_GPADC			0x00
-#define SW_AVG_16			0x60
+#define AVG_1				0x00
+#define AVG_4				0x20
+#define AVG_8				0x40
+#define AVG_16				0x60
 #define ADC_SW_CONV			0x04
 #define EN_ICHAR			0x80
 #define BTEMP_PULL_UP			0x08
 #define EN_BUF				0x40
 #define DIS_ZERO			0x00
 #define GPADC_BUSY			0x01
+#define EN_FALLING			0x10
+#define EN_TRIG_EDGE			0x02
+#define EN_VBIAS_XTAL_TEMP		0x02
 
 /* GPADC constants from AB8500 spec, UM0836 */
 #define ADC_RESOLUTION			1024
@@ -80,8 +97,21 @@
 #define ADC_CH_BKBAT_MIN		0
 #define ADC_CH_BKBAT_MAX		3200
 
+/* GPADC constants from AB8540 spec */
+#define ADC_CH_IBAT_MIN			(-6000) /* mA range measured by ADC for ibat*/
+#define ADC_CH_IBAT_MAX			6000
+#define ADC_CH_IBAT_MIN_V		(-60)	/* mV range measured by ADC for ibat*/
+#define ADC_CH_IBAT_MAX_V		60
+#define IBAT_VDROP_L			(-56)  /* mV */
+#define IBAT_VDROP_H			56
+
 /* This is used to not lose precision when dividing to get gain and offset */
-#define CALIB_SCALE			1000
+#define CALIB_SCALE		1000
+/*
+ * Number of bits shift used to not lose precision
+ * when dividing to get ibat gain.
+ */
+#define CALIB_SHIFT_IBAT	20
 
 /* Time in ms before disabling regulator */
 #define GPADC_AUDOSUSPEND_DELAY		1
@@ -92,6 +122,7 @@
 	ADC_INPUT_VMAIN = 0,
 	ADC_INPUT_BTEMP,
 	ADC_INPUT_VBAT,
+	ADC_INPUT_IBAT,
 	NBR_CAL_INPUTS,
 };
 
@@ -102,8 +133,10 @@
  * @offset:		Offset of the ADC channel
  */
 struct adc_cal_data {
-	u64 gain;
-	u64 offset;
+	s64 gain;
+	s64 offset;
+	u16 otp_calib_hi;
+	u16 otp_calib_lo;
 };
 
 /**
@@ -116,7 +149,10 @@
  *				the completion of gpadc conversion
  * @ab8500_gpadc_lock:		structure of type mutex
  * @regu:			pointer to the struct regulator
- * @irq:			interrupt number that is used by gpadc
+ * @irq_sw:			interrupt number that is used by gpadc for Sw
+ *				conversion
+ * @irq_hw:			interrupt number that is used by gpadc for Hw
+ *				conversion
  * @cal_data			array of ADC calibration data structs
  */
 struct ab8500_gpadc {
@@ -126,7 +162,8 @@
 	struct completion ab8500_gpadc_complete;
 	struct mutex ab8500_gpadc_lock;
 	struct regulator *regu;
-	int irq;
+	int irq_sw;
+	int irq_hw;
 	struct adc_cal_data cal_data[NBR_CAL_INPUTS];
 };
 
@@ -171,6 +208,7 @@
 			gpadc->cal_data[ADC_INPUT_VMAIN].offset) / CALIB_SCALE;
 		break;
 
+	case XTAL_TEMP:
 	case BAT_CTRL:
 	case BTEMP_BALL:
 	case ACC_DETECT1:
@@ -189,6 +227,7 @@
 		break;
 
 	case MAIN_BAT_V:
+	case VBAT_TRUE_MEAS:
 		/* For some reason we don't have calibrated data */
 		if (!gpadc->cal_data[ADC_INPUT_VBAT].gain) {
 			res = ADC_CH_VBAT_MIN + (ADC_CH_VBAT_MAX -
@@ -232,6 +271,20 @@
 			ADC_RESOLUTION;
 		break;
 
+	case IBAT_VIRTUAL_CHANNEL:
+		/* For some reason we don't have calibrated data */
+		if (!gpadc->cal_data[ADC_INPUT_IBAT].gain) {
+			res = ADC_CH_IBAT_MIN + (ADC_CH_IBAT_MAX -
+				ADC_CH_IBAT_MIN) * ad_value /
+				ADC_RESOLUTION;
+			break;
+		}
+		/* Here we can use the calibrated data */
+		res = (int) (ad_value * gpadc->cal_data[ADC_INPUT_IBAT].gain +
+				gpadc->cal_data[ADC_INPUT_IBAT].offset)
+				>> CALIB_SHIFT_IBAT;
+		break;
+
 	default:
 		dev_err(gpadc->dev,
 			"unknown channel, not possible to convert\n");
@@ -244,25 +297,35 @@
 EXPORT_SYMBOL(ab8500_gpadc_ad_to_voltage);
 
 /**
- * ab8500_gpadc_convert() - gpadc conversion
+ * ab8500_gpadc_sw_hw_convert() - gpadc conversion
  * @channel:	analog channel to be converted to digital data
+ * @avg_sample:  number of ADC sample to average
+ * @trig_egde:  selected ADC trig edge
+ * @trig_timer: selected ADC trigger delay timer
+ * @conv_type: selected conversion type (HW or SW conversion)
  *
  * This function converts the selected analog i/p to digital
  * data.
  */
-int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 channel)
+int ab8500_gpadc_sw_hw_convert(struct ab8500_gpadc *gpadc, u8 channel,
+		u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type)
 {
 	int ad_value;
 	int voltage;
 
-	ad_value = ab8500_gpadc_read_raw(gpadc, channel);
-	if (ad_value < 0) {
-		dev_err(gpadc->dev, "GPADC raw value failed ch: %d\n", channel);
+	ad_value = ab8500_gpadc_read_raw(gpadc, channel, avg_sample,
+			trig_edge, trig_timer, conv_type);
+/* On failure retry a second time */
+	if (ad_value < 0)
+		ad_value = ab8500_gpadc_read_raw(gpadc, channel, avg_sample,
+			trig_edge, trig_timer, conv_type);
+if (ad_value < 0) {
+		dev_err(gpadc->dev, "GPADC raw value failed ch: %d\n",
+				channel);
 		return ad_value;
 	}
 
 	voltage = ab8500_gpadc_ad_to_voltage(gpadc, channel, ad_value);
-
 	if (voltage < 0)
 		dev_err(gpadc->dev, "GPADC to voltage conversion failed ch:"
 			" %d AD: 0x%x\n", channel, ad_value);
@@ -274,21 +337,46 @@
 /**
  * ab8500_gpadc_read_raw() - gpadc read
  * @channel:	analog channel to be read
+ * @avg_sample:  number of ADC sample to average
+ * @trig_edge:  selected trig edge
+ * @trig_timer: selected ADC trigger delay timer
+ * @conv_type: selected conversion type (HW or SW conversion)
  *
- * This function obtains the raw ADC value, this then needs
- * to be converted by calling ab8500_gpadc_ad_to_voltage()
+ * This function obtains the raw ADC value for an hardware conversion,
+ * this then needs to be converted by calling ab8500_gpadc_ad_to_voltage()
  */
-int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel)
+int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
+		u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type)
+{
+	int raw_data;
+	raw_data = ab8500_gpadc_double_read_raw(gpadc, channel,
+			avg_sample, trig_edge, trig_timer, conv_type, NULL);
+	return raw_data;
+}
+
+int ab8500_gpadc_double_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
+		u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type,
+		int *ibat)
 {
 	int ret;
 	int looplimit = 0;
-	u8 val, low_data, high_data;
+	unsigned long completion_timeout;
+	u8 val, low_data, high_data, low_data2, high_data2;
+	u8 val_reg1 = 0;
+	unsigned int delay_min = 0;
+	unsigned int delay_max = 0;
+	u8 data_low_addr, data_high_addr;
 
 	if (!gpadc)
 		return -ENODEV;
 
-	mutex_lock(&gpadc->ab8500_gpadc_lock);
+	/* check if convertion is supported */
+	if ((gpadc->irq_sw < 0) && (conv_type == ADC_SW))
+		return -ENOTSUPP;
+	if ((gpadc->irq_hw < 0) && (conv_type == ADC_HW))
+		return -ENOTSUPP;
 
+	mutex_lock(&gpadc->ab8500_gpadc_lock);
 	/* Enable VTVout LDO this is required for GPADC */
 	pm_runtime_get_sync(gpadc->dev);
 
@@ -309,16 +397,34 @@
 	}
 
 	/* Enable GPADC */
-	ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
-		AB8500_GPADC, AB8500_GPADC_CTRL1_REG, EN_GPADC, EN_GPADC);
-	if (ret < 0) {
-		dev_err(gpadc->dev, "gpadc_conversion: enable gpadc failed\n");
-		goto out;
+	val_reg1 |= EN_GPADC;
+
+	/* Select the channel source and set average samples */
+	switch (avg_sample) {
+	case SAMPLE_1:
+		val = channel | AVG_1;
+		break;
+	case SAMPLE_4:
+		val = channel | AVG_4;
+		break;
+	case SAMPLE_8:
+		val = channel | AVG_8;
+		break;
+	default:
+		val = channel | AVG_16;
+		break;
 	}
 
-	/* Select the channel source and set average samples to 16 */
-	ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
-		AB8500_GPADC_CTRL2_REG, (channel | SW_AVG_16));
+	if (conv_type == ADC_HW) {
+		ret = abx500_set_register_interruptible(gpadc->dev,
+				AB8500_GPADC, AB8500_GPADC_CTRL3_REG, val);
+		val_reg1 |= EN_TRIG_EDGE;
+		if (trig_edge)
+			val_reg1 |= EN_FALLING;
+	}
+	else
+		ret = abx500_set_register_interruptible(gpadc->dev,
+				AB8500_GPADC, AB8500_GPADC_CTRL2_REG, val);
 	if (ret < 0) {
 		dev_err(gpadc->dev,
 			"gpadc_conversion: set avg samples failed\n");
@@ -333,71 +439,129 @@
 	switch (channel) {
 	case MAIN_CHARGER_C:
 	case USB_CHARGER_C:
-		ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
-			AB8500_GPADC, AB8500_GPADC_CTRL1_REG,
-			EN_BUF | EN_ICHAR,
-			EN_BUF | EN_ICHAR);
+		val_reg1 |= EN_BUF | EN_ICHAR;
 		break;
 	case BTEMP_BALL:
 		if (!is_ab8500_2p0_or_earlier(gpadc->parent)) {
-			/* Turn on btemp pull-up on ABB 3.0 */
-			ret = abx500_mask_and_set_register_interruptible(
-				gpadc->dev,
-				AB8500_GPADC, AB8500_GPADC_CTRL1_REG,
-				EN_BUF | BTEMP_PULL_UP,
-				EN_BUF | BTEMP_PULL_UP);
-
-		 /*
-		  * Delay might be needed for ABB8500 cut 3.0, if not, remove
-		  * when hardware will be available
-		  */
-			usleep_range(1000, 1000);
+			val_reg1 |= EN_BUF | BTEMP_PULL_UP;
+			/*
+			* Delay might be needed for ABB8500 cut 3.0, if not,
+			* remove when hardware will be availible
+			*/
+			delay_min = 1000; /* Delay in micro seconds */
+			delay_max = 10000; /* large range to optimise sleep mode */
 			break;
 		}
 		/* Intentional fallthrough */
 	default:
-		ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
-			AB8500_GPADC, AB8500_GPADC_CTRL1_REG, EN_BUF, EN_BUF);
+		val_reg1 |= EN_BUF;
 		break;
 	}
+
+	/* Write configuration to register */
+	ret = abx500_set_register_interruptible(gpadc->dev,
+		AB8500_GPADC, AB8500_GPADC_CTRL1_REG, val_reg1);
 	if (ret < 0) {
 		dev_err(gpadc->dev,
-			"gpadc_conversion: select falling edge failed\n");
+			"gpadc_conversion: set Control register failed\n");
 		goto out;
 	}
 
-	ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
-		AB8500_GPADC, AB8500_GPADC_CTRL1_REG, ADC_SW_CONV, ADC_SW_CONV);
-	if (ret < 0) {
-		dev_err(gpadc->dev,
-			"gpadc_conversion: start s/w conversion failed\n");
-		goto out;
+	if (delay_min != 0)
+		usleep_range(delay_min, delay_max);
+
+	if (conv_type == ADC_HW) {
+		/* Set trigger delay timer */
+		ret = abx500_set_register_interruptible(gpadc->dev,
+			AB8500_GPADC, AB8500_GPADC_AUTO_TIMER_REG, trig_timer);
+		if (ret < 0) {
+			dev_err(gpadc->dev,
+				"gpadc_conversion: trig timer failed\n");
+			goto out;
+		}
+		completion_timeout = 2 * HZ;
+		data_low_addr = AB8500_GPADC_AUTODATAL_REG;
+		data_high_addr = AB8500_GPADC_AUTODATAH_REG;
+	} else {
+		/* Start SW conversion */
+		ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
+			AB8500_GPADC, AB8500_GPADC_CTRL1_REG,
+			ADC_SW_CONV, ADC_SW_CONV);
+		if (ret < 0) {
+			dev_err(gpadc->dev,
+				"gpadc_conversion: start s/w conv failed\n");
+			goto out;
+		}
+		completion_timeout = msecs_to_jiffies(CONVERSION_TIME);
+		data_low_addr = AB8500_GPADC_MANDATAL_REG;
+		data_high_addr = AB8500_GPADC_MANDATAH_REG;
 	}
+
 	/* wait for completion of conversion */
 	if (!wait_for_completion_timeout(&gpadc->ab8500_gpadc_complete,
-					 msecs_to_jiffies(CONVERSION_TIME))) {
+			completion_timeout)) {
 		dev_err(gpadc->dev,
-			"timeout: didn't receive GPADC conversion interrupt\n");
+			"timeout didn't receive GPADC conv interrupt\n");
 		ret = -EINVAL;
 		goto out;
 	}
 
 	/* Read the converted RAW data */
-	ret = abx500_get_register_interruptible(gpadc->dev, AB8500_GPADC,
-		AB8500_GPADC_MANDATAL_REG, &low_data);
+	ret = abx500_get_register_interruptible(gpadc->dev,
+			AB8500_GPADC, data_low_addr, &low_data);
 	if (ret < 0) {
 		dev_err(gpadc->dev, "gpadc_conversion: read low data failed\n");
 		goto out;
 	}
 
-	ret = abx500_get_register_interruptible(gpadc->dev, AB8500_GPADC,
-		AB8500_GPADC_MANDATAH_REG, &high_data);
+	ret = abx500_get_register_interruptible(gpadc->dev,
+		AB8500_GPADC, data_high_addr, &high_data);
 	if (ret < 0) {
-		dev_err(gpadc->dev,
-			"gpadc_conversion: read high data failed\n");
+		dev_err(gpadc->dev, "gpadc_conversion: read high data failed\n");
 		goto out;
 	}
 
+	/* Check if double convertion is required */
+	if ((channel == BAT_CTRL_AND_IBAT) ||
+			(channel == VBAT_MEAS_AND_IBAT) ||
+			(channel == VBAT_TRUE_MEAS_AND_IBAT) ||
+			(channel == BAT_TEMP_AND_IBAT)) {
+
+		if (conv_type == ADC_HW) {
+			/* not supported */
+			ret = -ENOTSUPP;
+			dev_err(gpadc->dev,
+				"gpadc_conversion: only SW double conversion supported\n");
+			goto out;
+		} else {
+			/* Read the converted RAW data 2 */
+			ret = abx500_get_register_interruptible(gpadc->dev,
+				AB8500_GPADC, AB8540_GPADC_MANDATA2L_REG,
+				&low_data2);
+			if (ret < 0) {
+				dev_err(gpadc->dev,
+					"gpadc_conversion: read sw low data 2 failed\n");
+				goto out;
+			}
+
+			ret = abx500_get_register_interruptible(gpadc->dev,
+				AB8500_GPADC, AB8540_GPADC_MANDATA2H_REG,
+				&high_data2);
+			if (ret < 0) {
+				dev_err(gpadc->dev,
+					"gpadc_conversion: read sw high data 2 failed\n");
+				goto out;
+			}
+			if (ibat != NULL) {
+				*ibat = (high_data2 << 8) | low_data2;
+			} else {
+				dev_warn(gpadc->dev,
+					"gpadc_conversion: ibat not stored\n");
+			}
+
+		}
+	}
+
 	/* Disable GPADC */
 	ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
 		AB8500_GPADC_CTRL1_REG, DIS_GPADC);
@@ -406,6 +570,7 @@
 		goto out;
 	}
 
+	/* Disable VTVout LDO this is required for GPADC */
 	pm_runtime_mark_last_busy(gpadc->dev);
 	pm_runtime_put_autosuspend(gpadc->dev);
 
@@ -422,9 +587,7 @@
 	 */
 	(void) abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
 		AB8500_GPADC_CTRL1_REG, DIS_GPADC);
-
 	pm_runtime_put(gpadc->dev);
-
 	mutex_unlock(&gpadc->ab8500_gpadc_lock);
 	dev_err(gpadc->dev,
 		"gpadc_conversion: Failed to AD convert channel %d\n", channel);
@@ -433,16 +596,16 @@
 EXPORT_SYMBOL(ab8500_gpadc_read_raw);
 
 /**
- * ab8500_bm_gpswadcconvend_handler() - isr for s/w gpadc conversion completion
+ * ab8500_bm_gpadcconvend_handler() - isr for gpadc conversion completion
  * @irq:	irq number
  * @data:	pointer to the data passed during request irq
  *
- * This is a interrupt service routine for s/w gpadc conversion completion.
+ * This is a interrupt service routine for gpadc conversion completion.
  * Notifies the gpadc completion is completed and the converted raw value
  * can be read from the registers.
  * Returns IRQ status(IRQ_HANDLED)
  */
-static irqreturn_t ab8500_bm_gpswadcconvend_handler(int irq, void *_gpadc)
+static irqreturn_t ab8500_bm_gpadcconvend_handler(int irq, void *_gpadc)
 {
 	struct ab8500_gpadc *gpadc = _gpadc;
 
@@ -461,15 +624,27 @@
 	AB8500_GPADC_CAL_7,
 };
 
+static int otp4_cal_regs[] = {
+	AB8540_GPADC_OTP4_REG_7,
+	AB8540_GPADC_OTP4_REG_6,
+	AB8540_GPADC_OTP4_REG_5,
+};
+
 static void ab8500_gpadc_read_calibration_data(struct ab8500_gpadc *gpadc)
 {
 	int i;
 	int ret[ARRAY_SIZE(otp_cal_regs)];
 	u8 gpadc_cal[ARRAY_SIZE(otp_cal_regs)];
-
+	int ret_otp4[ARRAY_SIZE(otp4_cal_regs)];
+	u8 gpadc_otp4[ARRAY_SIZE(otp4_cal_regs)];
 	int vmain_high, vmain_low;
 	int btemp_high, btemp_low;
 	int vbat_high, vbat_low;
+	int ibat_high, ibat_low;
+	s64 V_gain, V_offset, V2A_gain, V2A_offset;
+	struct ab8500 *ab8500;
+
+	ab8500 = gpadc->parent;
 
 	/* First we read all OTP registers and store the error code */
 	for (i = 0; i < ARRAY_SIZE(otp_cal_regs); i++) {
@@ -489,7 +664,7 @@
 	 * bt_h/l = btemp_high/low
 	 * vb_h/l = vbat_high/low
 	 *
-	 * Data bits:
+	 * Data bits 8500/9540:
 	 * | 7	   | 6	   | 5	   | 4	   | 3	   | 2	   | 1	   | 0
 	 * |.......|.......|.......|.......|.......|.......|.......|.......
 	 * |						   | vm_h9 | vm_h8
@@ -507,6 +682,35 @@
 	 * | vb_l5 | vb_l4 | vb_l3 | vb_l2 | vb_l1 | vb_l0 |
 	 * |.......|.......|.......|.......|.......|.......|.......|.......
 	 *
+	 * Data bits 8540:
+	 * OTP2
+	 * | 7	   | 6	   | 5	   | 4	   | 3	   | 2	   | 1	   | 0
+	 * |.......|.......|.......|.......|.......|.......|.......|.......
+	 * |
+	 * |.......|.......|.......|.......|.......|.......|.......|.......
+	 * | vm_h9 | vm_h8 | vm_h7 | vm_h6 | vm_h5 | vm_h4 | vm_h3 | vm_h2
+	 * |.......|.......|.......|.......|.......|.......|.......|.......
+	 * | vm_h1 | vm_h0 | vm_l4 | vm_l3 | vm_l2 | vm_l1 | vm_l0 | bt_h9
+	 * |.......|.......|.......|.......|.......|.......|.......|.......
+	 * | bt_h8 | bt_h7 | bt_h6 | bt_h5 | bt_h4 | bt_h3 | bt_h2 | bt_h1
+	 * |.......|.......|.......|.......|.......|.......|.......|.......
+	 * | bt_h0 | bt_l4 | bt_l3 | bt_l2 | bt_l1 | bt_l0 | vb_h9 | vb_h8
+	 * |.......|.......|.......|.......|.......|.......|.......|.......
+	 * | vb_h7 | vb_h6 | vb_h5 | vb_h4 | vb_h3 | vb_h2 | vb_h1 | vb_h0
+	 * |.......|.......|.......|.......|.......|.......|.......|.......
+	 * | vb_l5 | vb_l4 | vb_l3 | vb_l2 | vb_l1 | vb_l0 |
+	 * |.......|.......|.......|.......|.......|.......|.......|.......
+	 *
+	 * Data bits 8540:
+	 * OTP4
+	 * | 7	   | 6	   | 5	   | 4	   | 3	   | 2	   | 1	   | 0
+	 * |.......|.......|.......|.......|.......|.......|.......|.......
+	 * |					   | ib_h9 | ib_h8 | ib_h7
+	 * |.......|.......|.......|.......|.......|.......|.......|.......
+	 * | ib_h6 | ib_h5 | ib_h4 | ib_h3 | ib_h2 | ib_h1 | ib_h0 | ib_l5
+	 * |.......|.......|.......|.......|.......|.......|.......|.......
+	 * | ib_l4 | ib_l3 | ib_l2 | ib_l1 | ib_l0 |
+	 *
 	 *
 	 * Ideal output ADC codes corresponding to injected input voltages
 	 * during manufacturing is:
@@ -519,38 +723,116 @@
 	 * vbat_low:   Vin = 2380mV  / ADC ideal code = 33
 	 */
 
-	/* Calculate gain and offset for VMAIN if all reads succeeded */
-	if (!(ret[0] < 0 || ret[1] < 0 || ret[2] < 0)) {
-		vmain_high = (((gpadc_cal[0] & 0x03) << 8) |
-			((gpadc_cal[1] & 0x3F) << 2) |
-			((gpadc_cal[2] & 0xC0) >> 6));
+	if (is_ab8540(ab8500)) {
+		/* Calculate gain and offset for VMAIN if all reads succeeded*/
+		if (!(ret[1] < 0 || ret[2] < 0)) {
+			vmain_high = (((gpadc_cal[1] & 0xFF) << 2) |
+				((gpadc_cal[2] & 0xC0) >> 6));
+			vmain_low = ((gpadc_cal[2] & 0x3E) >> 1);
 
-		vmain_low = ((gpadc_cal[2] & 0x3E) >> 1);
+			gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_hi =
+				(u16)vmain_high;
+			gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_lo =
+				(u16)vmain_low;
 
-		gpadc->cal_data[ADC_INPUT_VMAIN].gain = CALIB_SCALE *
-			(19500 - 315) /	(vmain_high - vmain_low);
-
-		gpadc->cal_data[ADC_INPUT_VMAIN].offset = CALIB_SCALE * 19500 -
-			(CALIB_SCALE * (19500 - 315) /
-			 (vmain_high - vmain_low)) * vmain_high;
-	} else {
+			gpadc->cal_data[ADC_INPUT_VMAIN].gain = CALIB_SCALE *
+				(19500 - 315) / (vmain_high - vmain_low);
+			gpadc->cal_data[ADC_INPUT_VMAIN].offset = CALIB_SCALE *
+				19500 - (CALIB_SCALE * (19500 - 315) /
+				(vmain_high - vmain_low)) * vmain_high;
+		} else {
 		gpadc->cal_data[ADC_INPUT_VMAIN].gain = 0;
+		}
+
+		/* Read IBAT calibration Data */
+		for (i = 0; i < ARRAY_SIZE(otp4_cal_regs); i++) {
+			ret_otp4[i] = abx500_get_register_interruptible(
+					gpadc->dev, AB8500_OTP_EMUL,
+					otp4_cal_regs[i],  &gpadc_otp4[i]);
+			if (ret_otp4[i] < 0)
+				dev_err(gpadc->dev,
+					"%s: read otp4 reg 0x%02x failed\n",
+					__func__, otp4_cal_regs[i]);
+		}
+
+		/* Calculate gain and offset for IBAT if all reads succeeded */
+		if (!(ret_otp4[0] < 0 || ret_otp4[1] < 0 || ret_otp4[2] < 0)) {
+			ibat_high = (((gpadc_otp4[0] & 0x07) << 7) |
+				((gpadc_otp4[1] & 0xFE) >> 1));
+			ibat_low = (((gpadc_otp4[1] & 0x01) << 5) |
+				((gpadc_otp4[2] & 0xF8) >> 3));
+
+			gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_hi =
+				(u16)ibat_high;
+			gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_lo =
+				(u16)ibat_low;
+
+			V_gain = ((IBAT_VDROP_H - IBAT_VDROP_L)
+				<< CALIB_SHIFT_IBAT) / (ibat_high - ibat_low);
+
+			V_offset = (IBAT_VDROP_H << CALIB_SHIFT_IBAT) -
+				(((IBAT_VDROP_H - IBAT_VDROP_L) <<
+				CALIB_SHIFT_IBAT) / (ibat_high - ibat_low))
+				* ibat_high;
+			/*
+			 * Result obtained is in mV (at a scale factor),
+			 * we need to calculate gain and offset to get mA
+			 */
+			V2A_gain = (ADC_CH_IBAT_MAX - ADC_CH_IBAT_MIN)/
+				(ADC_CH_IBAT_MAX_V - ADC_CH_IBAT_MIN_V);
+			V2A_offset = ((ADC_CH_IBAT_MAX_V * ADC_CH_IBAT_MIN -
+				ADC_CH_IBAT_MAX * ADC_CH_IBAT_MIN_V)
+				<< CALIB_SHIFT_IBAT)
+				/ (ADC_CH_IBAT_MAX_V - ADC_CH_IBAT_MIN_V);
+
+			gpadc->cal_data[ADC_INPUT_IBAT].gain = V_gain * V2A_gain;
+			gpadc->cal_data[ADC_INPUT_IBAT].offset = V_offset *
+				V2A_gain + V2A_offset;
+		} else {
+			gpadc->cal_data[ADC_INPUT_IBAT].gain = 0;
+		}
+
+		dev_dbg(gpadc->dev, "IBAT gain %llu offset %llu\n",
+			gpadc->cal_data[ADC_INPUT_IBAT].gain,
+			gpadc->cal_data[ADC_INPUT_IBAT].offset);
+	} else {
+		/* Calculate gain and offset for VMAIN if all reads succeeded */
+		if (!(ret[0] < 0 || ret[1] < 0 || ret[2] < 0)) {
+			vmain_high = (((gpadc_cal[0] & 0x03) << 8) |
+				((gpadc_cal[1] & 0x3F) << 2) |
+				((gpadc_cal[2] & 0xC0) >> 6));
+			vmain_low = ((gpadc_cal[2] & 0x3E) >> 1);
+
+			gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_hi =
+				(u16)vmain_high;
+			gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_lo =
+				(u16)vmain_low;
+
+			gpadc->cal_data[ADC_INPUT_VMAIN].gain = CALIB_SCALE *
+				(19500 - 315) / (vmain_high - vmain_low);
+
+			gpadc->cal_data[ADC_INPUT_VMAIN].offset = CALIB_SCALE *
+				19500 - (CALIB_SCALE * (19500 - 315) /
+				(vmain_high - vmain_low)) * vmain_high;
+		} else {
+			gpadc->cal_data[ADC_INPUT_VMAIN].gain = 0;
+		}
 	}
 
 	/* Calculate gain and offset for BTEMP if all reads succeeded */
 	if (!(ret[2] < 0 || ret[3] < 0 || ret[4] < 0)) {
 		btemp_high = (((gpadc_cal[2] & 0x01) << 9) |
-			(gpadc_cal[3] << 1) |
-			((gpadc_cal[4] & 0x80) >> 7));
-
+			(gpadc_cal[3] << 1) | ((gpadc_cal[4] & 0x80) >> 7));
 		btemp_low = ((gpadc_cal[4] & 0x7C) >> 2);
 
+		gpadc->cal_data[ADC_INPUT_BTEMP].otp_calib_hi = (u16)btemp_high;
+		gpadc->cal_data[ADC_INPUT_BTEMP].otp_calib_lo = (u16)btemp_low;
+
 		gpadc->cal_data[ADC_INPUT_BTEMP].gain =
 			CALIB_SCALE * (1300 - 21) / (btemp_high - btemp_low);
-
 		gpadc->cal_data[ADC_INPUT_BTEMP].offset = CALIB_SCALE * 1300 -
-			(CALIB_SCALE * (1300 - 21) /
-			(btemp_high - btemp_low)) * btemp_high;
+			(CALIB_SCALE * (1300 - 21) / (btemp_high - btemp_low))
+			* btemp_high;
 	} else {
 		gpadc->cal_data[ADC_INPUT_BTEMP].gain = 0;
 	}
@@ -560,9 +842,11 @@
 		vbat_high = (((gpadc_cal[4] & 0x03) << 8) | gpadc_cal[5]);
 		vbat_low = ((gpadc_cal[6] & 0xFC) >> 2);
 
+		gpadc->cal_data[ADC_INPUT_VBAT].otp_calib_hi = (u16)vbat_high;
+		gpadc->cal_data[ADC_INPUT_VBAT].otp_calib_lo = (u16)vbat_low;
+
 		gpadc->cal_data[ADC_INPUT_VBAT].gain = CALIB_SCALE *
 			(4700 - 2380) /	(vbat_high - vbat_low);
-
 		gpadc->cal_data[ADC_INPUT_VBAT].offset = CALIB_SCALE * 4700 -
 			(CALIB_SCALE * (4700 - 2380) /
 			(vbat_high - vbat_low)) * vbat_high;
@@ -608,6 +892,31 @@
 	return 0;
 }
 
+static int ab8500_gpadc_suspend(struct device *dev)
+{
+	struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
+
+	mutex_lock(&gpadc->ab8500_gpadc_lock);
+
+	pm_runtime_get_sync(dev);
+
+	regulator_disable(gpadc->regu);
+	return 0;
+}
+
+static int ab8500_gpadc_resume(struct device *dev)
+{
+	struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
+
+	regulator_enable(gpadc->regu);
+
+	pm_runtime_mark_last_busy(gpadc->dev);
+	pm_runtime_put_autosuspend(gpadc->dev);
+
+	mutex_unlock(&gpadc->ab8500_gpadc_lock);
+	return 0;
+}
+
 static int ab8500_gpadc_probe(struct platform_device *pdev)
 {
 	int ret = 0;
@@ -619,13 +928,13 @@
 		return -ENOMEM;
 	}
 
-	gpadc->irq = platform_get_irq_byname(pdev, "SW_CONV_END");
-	if (gpadc->irq < 0) {
-		dev_err(&pdev->dev, "failed to get platform irq-%d\n",
-			gpadc->irq);
-		ret = gpadc->irq;
-		goto fail;
-	}
+	gpadc->irq_sw = platform_get_irq_byname(pdev, "SW_CONV_END");
+	if (gpadc->irq_sw < 0)
+		dev_err(gpadc->dev, "failed to get platform sw_conv_end irq\n");
+
+	gpadc->irq_hw = platform_get_irq_byname(pdev, "HW_CONV_END");
+	if (gpadc->irq_hw < 0)
+		dev_err(gpadc->dev, "failed to get platform hw_conv_end irq\n");
 
 	gpadc->dev = &pdev->dev;
 	gpadc->parent = dev_get_drvdata(pdev->dev.parent);
@@ -634,15 +943,31 @@
 	/* Initialize completion used to notify completion of conversion */
 	init_completion(&gpadc->ab8500_gpadc_complete);
 
-	/* Register interrupt  - SwAdcComplete */
-	ret = request_threaded_irq(gpadc->irq, NULL,
-		ab8500_bm_gpswadcconvend_handler,
-		IRQF_ONESHOT | IRQF_NO_SUSPEND | IRQF_SHARED,
-				"ab8500-gpadc", gpadc);
-	if (ret < 0) {
-		dev_err(gpadc->dev, "Failed to register interrupt, irq: %d\n",
-			gpadc->irq);
-		goto fail;
+	/* Register interrupts */
+	if (gpadc->irq_sw >= 0) {
+		ret = request_threaded_irq(gpadc->irq_sw, NULL,
+			ab8500_bm_gpadcconvend_handler,
+			IRQF_NO_SUSPEND | IRQF_SHARED, "ab8500-gpadc-sw",
+			gpadc);
+		if (ret < 0) {
+			dev_err(gpadc->dev,
+				"Failed to register interrupt irq: %d\n",
+				gpadc->irq_sw);
+			goto fail;
+		}
+	}
+
+	if (gpadc->irq_hw >= 0) {
+		ret = request_threaded_irq(gpadc->irq_hw, NULL,
+			ab8500_bm_gpadcconvend_handler,
+			IRQF_NO_SUSPEND | IRQF_SHARED, "ab8500-gpadc-hw",
+			gpadc);
+		if (ret < 0) {
+			dev_err(gpadc->dev,
+				"Failed to register interrupt irq: %d\n",
+				gpadc->irq_hw);
+			goto fail_irq;
+		}
 	}
 
 	/* VTVout LDO used to power up ab8500-GPADC */
@@ -669,11 +994,13 @@
 	ab8500_gpadc_read_calibration_data(gpadc);
 	list_add_tail(&gpadc->node, &ab8500_gpadc_list);
 	dev_dbg(gpadc->dev, "probe success\n");
+
 	return 0;
 
 fail_enable:
 fail_irq:
-	free_irq(gpadc->irq, gpadc);
+	free_irq(gpadc->irq_sw, gpadc);
+	free_irq(gpadc->irq_hw, gpadc);
 fail:
 	kfree(gpadc);
 	gpadc = NULL;
@@ -687,7 +1014,10 @@
 	/* remove this gpadc entry from the list */
 	list_del(&gpadc->node);
 	/* remove interrupt  - completion of Sw ADC conversion */
-	free_irq(gpadc->irq, gpadc);
+	if (gpadc->irq_sw >= 0)
+		free_irq(gpadc->irq_sw, gpadc);
+	if (gpadc->irq_hw >= 0)
+		free_irq(gpadc->irq_hw, gpadc);
 
 	pm_runtime_get_sync(gpadc->dev);
 	pm_runtime_disable(gpadc->dev);
@@ -707,6 +1037,9 @@
 	SET_RUNTIME_PM_OPS(ab8500_gpadc_runtime_suspend,
 			   ab8500_gpadc_runtime_resume,
 			   ab8500_gpadc_runtime_idle)
+	SET_SYSTEM_SLEEP_PM_OPS(ab8500_gpadc_suspend,
+				ab8500_gpadc_resume)
+
 };
 
 static struct platform_driver ab8500_gpadc_driver = {
@@ -729,10 +1062,30 @@
 	platform_driver_unregister(&ab8500_gpadc_driver);
 }
 
+/**
+ * ab8540_gpadc_get_otp() - returns OTP values
+ *
+ */
+void ab8540_gpadc_get_otp(struct ab8500_gpadc *gpadc,
+			u16 *vmain_l, u16 *vmain_h, u16 *btemp_l, u16 *btemp_h,
+			u16 *vbat_l, u16 *vbat_h, u16 *ibat_l, u16 *ibat_h)
+{
+	*vmain_l = gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_lo;
+	*vmain_h = gpadc->cal_data[ADC_INPUT_VMAIN].otp_calib_hi;
+	*btemp_l = gpadc->cal_data[ADC_INPUT_BTEMP].otp_calib_lo;
+	*btemp_h = gpadc->cal_data[ADC_INPUT_BTEMP].otp_calib_hi;
+	*vbat_l = gpadc->cal_data[ADC_INPUT_VBAT].otp_calib_lo;
+	*vbat_h = gpadc->cal_data[ADC_INPUT_VBAT].otp_calib_hi;
+	*ibat_l = gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_lo;
+	*ibat_h = gpadc->cal_data[ADC_INPUT_IBAT].otp_calib_hi;
+	return ;
+}
+
 subsys_initcall_sync(ab8500_gpadc_init);
 module_exit(ab8500_gpadc_exit);
 
 MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Arun R Murthy, Daniel Willerud, Johan Palsson");
+MODULE_AUTHOR("Arun R Murthy, Daniel Willerud, Johan Palsson,"
+		"M'boumba Cedric Madianga");
 MODULE_ALIAS("platform:ab8500_gpadc");
 MODULE_DESCRIPTION("AB8500 GPADC driver");
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index 108fd86..272479c 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -15,19 +15,30 @@
 #include <linux/mfd/abx500/ab8500.h>
 #include <linux/mfd/abx500/ab8500-sysctrl.h>
 
+/* RtcCtrl bits */
+#define AB8500_ALARM_MIN_LOW  0x08
+#define AB8500_ALARM_MIN_MID 0x09
+#define RTC_CTRL 0x0B
+#define RTC_ALARM_ENABLE 0x4
+
 static struct device *sysctrl_dev;
 
 void ab8500_power_off(void)
 {
 	sigset_t old;
 	sigset_t all;
-	static char *pss[] = {"ab8500_ac", "ab8500_usb"};
+	static char *pss[] = {"ab8500_ac", "pm2301", "ab8500_usb"};
 	int i;
 	bool charger_present = false;
 	union power_supply_propval val;
 	struct power_supply *psy;
 	int ret;
 
+	if (sysctrl_dev == NULL) {
+		pr_err("%s: sysctrl not initialized\n", __func__);
+		return;
+	}
+
 	/*
 	 * If we have a charger connected and we're powering off,
 	 * reboot into charge-only mode.
@@ -74,6 +85,63 @@
 	}
 }
 
+/*
+ * Use the AB WD to reset the platform. It will perform a hard
+ * reset instead of a soft reset. Write the reset reason to
+ * the AB before reset, which can be read upon restart.
+ */
+void ab8500_restart(char mode, const char *cmd)
+{
+	struct ab8500_platform_data *plat;
+	struct ab8500_sysctrl_platform_data *pdata;
+	u16 reason = 0;
+	u8 val;
+
+	if (sysctrl_dev == NULL) {
+		pr_err("%s: sysctrl not initialized\n", __func__);
+		return;
+	}
+
+	plat = dev_get_platdata(sysctrl_dev->parent);
+	pdata = plat->sysctrl;
+	if (pdata->reboot_reason_code)
+		reason = pdata->reboot_reason_code(cmd);
+	else
+		pr_warn("[%s] No reboot reason set. Default reason %d\n",
+			__func__, reason);
+
+	/*
+	 * Disable RTC alarm, just a precaution so that no alarm
+	 * is running when WD reset is executed.
+	 */
+	abx500_get_register_interruptible(sysctrl_dev, AB8500_RTC,
+		RTC_CTRL , &val);
+	abx500_set_register_interruptible(sysctrl_dev, AB8500_RTC,
+		RTC_CTRL , (val & ~RTC_ALARM_ENABLE));
+
+	/*
+	 * Android is not using the RTC alarm registers during reboot
+	 * so we borrow them for writing the reason of reset
+	 */
+
+	/* reason[8 LSB] */
+	val = reason & 0xFF;
+	abx500_set_register_interruptible(sysctrl_dev, AB8500_RTC,
+		AB8500_ALARM_MIN_LOW , val);
+
+	/* reason[8 MSB] */
+	val = (reason>>8) & 0xFF;
+	abx500_set_register_interruptible(sysctrl_dev, AB8500_RTC,
+		AB8500_ALARM_MIN_MID , val);
+
+	/* Setting WD timeout to 0 */
+	ab8500_sysctrl_write(AB8500_MAINWDOGTIMER, 0xFF, 0x0);
+
+	/* Setting the parameters to AB8500 WD*/
+	ab8500_sysctrl_write(AB8500_MAINWDOGCTRL, 0xFF, (AB8500_ENABLE_WD |
+		AB8500_WD_RESTART_ON_EXPIRE | AB8500_KICK_WD));
+}
+
 static inline bool valid_bank(u8 bank)
 {
 	return ((bank == AB8500_SYS_CTRL1_BLOCK) ||
@@ -85,7 +153,7 @@
 	u8 bank;
 
 	if (sysctrl_dev == NULL)
-		return -EAGAIN;
+		return -EINVAL;
 
 	bank = (reg >> 8);
 	if (!valid_bank(bank))
@@ -101,7 +169,7 @@
 	u8 bank;
 
 	if (sysctrl_dev == NULL)
-		return -EAGAIN;
+		return -EINVAL;
 
 	bank = (reg >> 8);
 	if (!valid_bank(bank))
@@ -114,28 +182,36 @@
 
 static int ab8500_sysctrl_probe(struct platform_device *pdev)
 {
+	struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
 	struct ab8500_platform_data *plat;
 	struct ab8500_sysctrl_platform_data *pdata;
 
-	sysctrl_dev = &pdev->dev;
 	plat = dev_get_platdata(pdev->dev.parent);
+
+	if (!(plat && plat->sysctrl))
+		return -EINVAL;
+
 	if (plat->pm_power_off)
 		pm_power_off = ab8500_power_off;
 
 	pdata = plat->sysctrl;
 
 	if (pdata) {
-		int ret, i, j;
+		int last, ret, i, j;
 
-		for (i = AB8500_SYSCLKREQ1RFCLKBUF;
-		     i <= AB8500_SYSCLKREQ8RFCLKBUF; i++) {
+		if (is_ab8505(ab8500))
+			last = AB8500_SYSCLKREQ4RFCLKBUF;
+		else
+			last = AB8500_SYSCLKREQ8RFCLKBUF;
+
+		for (i = AB8500_SYSCLKREQ1RFCLKBUF; i <= last; i++) {
 			j = i - AB8500_SYSCLKREQ1RFCLKBUF;
 			ret = ab8500_sysctrl_write(i, 0xff,
-						   pdata->initial_req_buf_config[j]);
+					pdata->initial_req_buf_config[j]);
 			dev_dbg(&pdev->dev,
-				"Setting SysClkReq%dRfClkBuf 0x%X\n",
-				j + 1,
-				pdata->initial_req_buf_config[j]);
+					"Setting SysClkReq%dRfClkBuf 0x%X\n",
+					j + 1,
+					pdata->initial_req_buf_config[j]);
 			if (ret < 0) {
 				dev_err(&pdev->dev,
 					"unable to set sysClkReq%dRfClkBuf: "
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 14d4dce..d544e3a 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4121,7 +4121,7 @@
 		resource->res3.data.irq.sharable = ACPI_SHARED;
 
 		resource->res4.type = ACPI_RESOURCE_TYPE_END_TAG;
-
+		resource->res4.length = sizeof(struct acpi_resource);
 	}
 	/* setup Type 2/3 resources */
 	else {
@@ -4140,6 +4140,7 @@
 		resource->res2.data.irq.sharable = ACPI_SHARED;
 
 		resource->res3.type = ACPI_RESOURCE_TYPE_END_TAG;
+		resource->res3.length = sizeof(struct acpi_resource);
 	}
 
 	/* Attempt to set the resource */
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index 918d5f0..cf88f9b6 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -379,10 +379,6 @@
 		*type = (tag >> 3) & 0x0f;
 		*size = tag & 0x07;
 	}
-#if 0
-	printk(KERN_DEBUG "tag = 0x%x, type = 0x%x, size = %i\n", tag, *type,
-	       *size);
-#endif
 	if (*type == 0xff && *size == 0xffff)	/* probably invalid data */
 		return -1;
 	return 0;
@@ -813,13 +809,6 @@
 		if (!card)
 			continue;
 
-#if 0
-		dev_info(&card->dev,
-		       "vendor: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
-		       header[0], header[1], header[2], header[3], header[4],
-		       header[5], header[6], header[7], header[8]);
-		dev_info(&card->dev, "checksum = %#x\n", checksum);
-#endif
 		INIT_LIST_HEAD(&card->devices);
 		card->serial =
 		    (header[7] << 24) | (header[6] << 16) | (header[5] << 8) |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index b8f4ea7..9847ab1 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -634,6 +634,7 @@
 	}
 	/* resource will pointer the end resource now */
 	resource->type = ACPI_RESOURCE_TYPE_END_TAG;
+	resource->length = sizeof(struct acpi_resource);
 
 	return 0;
 }
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index 63ddb01..1c03ee8 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -185,10 +185,9 @@
 
 		if (pnp_bios_get_dev_node(&nodenum, PNPMODE_DYNAMIC, node))
 			break;
-		seq_printf(m, "%02x\t%08x\t%02x:%02x:%02x\t%04x\n",
+		seq_printf(m, "%02x\t%08x\t%3phC\t%04x\n",
 			     node->handle, node->eisa_id,
-			     node->type_code[0], node->type_code[1],
-			     node->type_code[2], node->flags);
+			     node->type_code, node->flags);
 		if (nodenum <= thisnodenum) {
 			printk(KERN_ERR
 			       "%s Node number 0x%x is out of sequence following node 0x%x. Aborting.\n",
diff --git a/drivers/power/88pm860x_charger.c b/drivers/power/88pm860x_charger.c
index 4b37a5a..36fb4b5 100644
--- a/drivers/power/88pm860x_charger.c
+++ b/drivers/power/88pm860x_charger.c
@@ -714,7 +714,6 @@
 	while (--i >= 0)
 		free_irq(info->irq[i], info);
 out:
-	kfree(info);
 	return ret;
 }
 
@@ -728,7 +727,6 @@
 	free_irq(info->irq[0], info);
 	for (i = 0; i < info->irq_nums; i++)
 		free_irq(info->irq[i], info);
-	kfree(info);
 	return 0;
 }
 
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index ffe02fb..0d0b5d7 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -340,6 +340,13 @@
 	  Say Y to include support for Summit Microelectronics SMB347
 	  Battery Charger.
 
+config CHARGER_TPS65090
+	tristate "TPS65090 battery charger driver"
+	depends on MFD_TPS65090
+	help
+	 Say Y here to enable support for battery charging with TPS65090
+	 PMIC chips.
+
 config AB8500_BM
 	bool "AB8500 Battery Management Driver"
 	depends on AB8500_CORE && AB8500_GPADC
@@ -353,13 +360,6 @@
 	  Say Y to enable support for the battery and AC power in the
 	  Goldfish emulator.
 
-config CHARGER_PM2301
-	bool "PM2301 Battery Charger Driver"
-	depends on AB8500_BM
-	help
-	  Say Y to include support for PM2301 charger driver.
-	  Depends on AB8500 battery management core.
-
 source "drivers/power/reset/Kconfig"
 
 endif # POWER_SUPPLY
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index 3f66436..653bf6c 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -39,7 +39,7 @@
 obj-$(CONFIG_BATTERY_JZ4740)	+= jz4740-battery.o
 obj-$(CONFIG_BATTERY_INTEL_MID)	+= intel_mid_battery.o
 obj-$(CONFIG_BATTERY_RX51)	+= rx51_battery.o
-obj-$(CONFIG_AB8500_BM)		+= ab8500_bmdata.o ab8500_charger.o ab8500_fg.o ab8500_btemp.o abx500_chargalg.o
+obj-$(CONFIG_AB8500_BM)		+= ab8500_bmdata.o ab8500_charger.o ab8500_fg.o ab8500_btemp.o abx500_chargalg.o pm2301_charger.o
 obj-$(CONFIG_CHARGER_ISP1704)	+= isp1704_charger.o
 obj-$(CONFIG_CHARGER_MAX8903)	+= max8903_charger.o
 obj-$(CONFIG_CHARGER_TWL4030)	+= twl4030_charger.o
@@ -47,10 +47,10 @@
 obj-$(CONFIG_CHARGER_LP8788)	+= lp8788-charger.o
 obj-$(CONFIG_CHARGER_GPIO)	+= gpio-charger.o
 obj-$(CONFIG_CHARGER_MANAGER)	+= charger-manager.o
-obj-$(CONFIG_CHARGER_PM2301)	+= pm2301_charger.o
 obj-$(CONFIG_CHARGER_MAX8997)	+= max8997_charger.o
 obj-$(CONFIG_CHARGER_MAX8998)	+= max8998_charger.o
 obj-$(CONFIG_CHARGER_BQ2415X)	+= bq2415x_charger.o
 obj-$(CONFIG_POWER_AVS)		+= avs/
 obj-$(CONFIG_CHARGER_SMB347)	+= smb347-charger.o
+obj-$(CONFIG_CHARGER_TPS65090)	+= tps65090-charger.o
 obj-$(CONFIG_POWER_RESET)	+= reset/
diff --git a/drivers/power/ab8500_bmdata.c b/drivers/power/ab8500_bmdata.c
index 7a96c06..d298645 100644
--- a/drivers/power/ab8500_bmdata.c
+++ b/drivers/power/ab8500_bmdata.c
@@ -11,7 +11,7 @@
  * Note that the res_to_temp table must be strictly sorted by falling resistance
  * values to work.
  */
-static struct abx500_res_to_temp temp_tbl_A_thermistor[] = {
+const struct abx500_res_to_temp ab8500_temp_tbl_a_thermistor[] = {
 	{-5, 53407},
 	{ 0, 48594},
 	{ 5, 43804},
@@ -28,8 +28,12 @@
 	{60, 13437},
 	{65, 12500},
 };
+EXPORT_SYMBOL(ab8500_temp_tbl_a_thermistor);
 
-static struct abx500_res_to_temp temp_tbl_B_thermistor[] = {
+const int ab8500_temp_tbl_a_size = ARRAY_SIZE(ab8500_temp_tbl_a_thermistor);
+EXPORT_SYMBOL(ab8500_temp_tbl_a_size);
+
+const struct abx500_res_to_temp ab8500_temp_tbl_b_thermistor[] = {
 	{-5, 200000},
 	{ 0, 159024},
 	{ 5, 151921},
@@ -46,8 +50,12 @@
 	{60,  85461},
 	{65,  82869},
 };
+EXPORT_SYMBOL(ab8500_temp_tbl_b_thermistor);
 
-static struct abx500_v_to_cap cap_tbl_A_thermistor[] = {
+const int ab8500_temp_tbl_b_size = ARRAY_SIZE(ab8500_temp_tbl_b_thermistor);
+EXPORT_SYMBOL(ab8500_temp_tbl_b_size);
+
+static const struct abx500_v_to_cap cap_tbl_a_thermistor[] = {
 	{4171,	100},
 	{4114,	 95},
 	{4009,	 83},
@@ -70,7 +78,7 @@
 	{3247,	  0},
 };
 
-static struct abx500_v_to_cap cap_tbl_B_thermistor[] = {
+static const struct abx500_v_to_cap cap_tbl_b_thermistor[] = {
 	{4161,	100},
 	{4124,	 98},
 	{4044,	 90},
@@ -93,7 +101,7 @@
 	{3250,	  0},
 };
 
-static struct abx500_v_to_cap cap_tbl[] = {
+static const struct abx500_v_to_cap cap_tbl[] = {
 	{4186,	100},
 	{4163,	 99},
 	{4114,	 95},
@@ -124,7 +132,7 @@
  * Note that the res_to_temp table must be strictly sorted by falling
  * resistance values to work.
  */
-static struct abx500_res_to_temp temp_tbl[] = {
+static const struct abx500_res_to_temp temp_tbl[] = {
 	{-5, 214834},
 	{ 0, 162943},
 	{ 5, 124820},
@@ -146,7 +154,7 @@
  * Note that the batres_vs_temp table must be strictly sorted by falling
  * temperature values to work.
  */
-static struct batres_vs_temp temp_to_batres_tbl_thermistor[] = {
+static const struct batres_vs_temp temp_to_batres_tbl_thermistor[] = {
 	{ 40, 120},
 	{ 30, 135},
 	{ 20, 165},
@@ -160,7 +168,7 @@
  * Note that the batres_vs_temp table must be strictly sorted by falling
  * temperature values to work.
  */
-static struct batres_vs_temp temp_to_batres_tbl_ext_thermistor[] = {
+static const struct batres_vs_temp temp_to_batres_tbl_ext_thermistor[] = {
 	{ 60, 300},
 	{ 30, 300},
 	{ 20, 300},
@@ -171,7 +179,7 @@
 };
 
 /* battery resistance table for LI ION 9100 battery */
-static struct batres_vs_temp temp_to_batres_tbl_9100[] = {
+static const struct batres_vs_temp temp_to_batres_tbl_9100[] = {
 	{ 60, 180},
 	{ 30, 180},
 	{ 20, 180},
@@ -230,10 +238,10 @@
 		.maint_b_chg_timer_h = 200,
 		.low_high_cur_lvl = 300,
 		.low_high_vol_lvl = 4000,
-		.n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_A_thermistor),
-		.r_to_t_tbl = temp_tbl_A_thermistor,
-		.n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_A_thermistor),
-		.v_to_cap_tbl = cap_tbl_A_thermistor,
+		.n_temp_tbl_elements = ARRAY_SIZE(ab8500_temp_tbl_a_thermistor),
+		.r_to_t_tbl = ab8500_temp_tbl_a_thermistor,
+		.n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_a_thermistor),
+		.v_to_cap_tbl = cap_tbl_a_thermistor,
 		.n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
 		.batres_tbl = temp_to_batres_tbl_thermistor,
 
@@ -258,10 +266,10 @@
 		.maint_b_chg_timer_h = 200,
 		.low_high_cur_lvl = 300,
 		.low_high_vol_lvl = 4000,
-		.n_temp_tbl_elements = ARRAY_SIZE(temp_tbl_B_thermistor),
-		.r_to_t_tbl = temp_tbl_B_thermistor,
-		.n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_B_thermistor),
-		.v_to_cap_tbl = cap_tbl_B_thermistor,
+		.n_temp_tbl_elements = ARRAY_SIZE(ab8500_temp_tbl_b_thermistor),
+		.r_to_t_tbl = ab8500_temp_tbl_b_thermistor,
+		.n_v_cap_tbl_elements = ARRAY_SIZE(cap_tbl_b_thermistor),
+		.v_to_cap_tbl = cap_tbl_b_thermistor,
 		.n_batres_tbl_elements = ARRAY_SIZE(temp_to_batres_tbl_thermistor),
 		.batres_tbl = temp_to_batres_tbl_thermistor,
 	},
@@ -407,15 +415,27 @@
 	.battok_raising_th_sel1 = 2860,
 	.maint_thres = 95,
 	.user_cap_limit = 15,
+	.pcut_enable = 1,
+	.pcut_max_time = 127,
+	.pcut_flag_time = 112,
+	.pcut_max_restart = 15,
+	.pcut_debounce_time = 2,
 };
 
-static const struct abx500_maxim_parameters maxi_params = {
+static const struct abx500_maxim_parameters ab8500_maxi_params = {
 	.ena_maxi = true,
 	.chg_curr = 910,
 	.wait_cycles = 10,
 	.charger_curr_step = 100,
 };
 
+static const struct abx500_maxim_parameters abx540_maxi_params = {
+        .ena_maxi = true,
+        .chg_curr = 3000,
+        .wait_cycles = 10,
+        .charger_curr_step = 200,
+};
+
 static const struct abx500_bm_charger_parameters chg = {
 	.usb_volt_max		= 5500,
 	.usb_curr_max		= 1500,
@@ -423,6 +443,46 @@
 	.ac_curr_max		= 1500,
 };
 
+/*
+ * This array maps the raw hex value to charger output current used by the
+ * AB8500 values
+ */
+static int ab8500_charge_output_curr_map[] = {
+        100,    200,    300,    400,    500,    600,    700,    800,
+        900,    1000,   1100,   1200,   1300,   1400,   1500,   1500,
+};
+
+static int ab8540_charge_output_curr_map[] = {
+        0,      0,      0,      75,     100,    125,    150,    175,
+        200,    225,    250,    275,    300,    325,    350,    375,
+        400,    425,    450,    475,    500,    525,    550,    575,
+        600,    625,    650,    675,    700,    725,    750,    775,
+        800,    825,    850,    875,    900,    925,    950,    975,
+        1000,   1025,   1050,   1075,   1100,   1125,   1150,   1175,
+        1200,   1225,   1250,   1275,   1300,   1325,   1350,   1375,
+        1400,   1425,   1450,   1500,   1600,   1700,   1900,   2000,
+};
+
+/*
+ * This array maps the raw hex value to charger input current used by the
+ * AB8500 values
+ */
+static int ab8500_charge_input_curr_map[] = {
+        50,     98,     193,    290,    380,    450,    500,    600,
+        700,    800,    900,    1000,   1100,   1300,   1400,   1500,
+};
+
+static int ab8540_charge_input_curr_map[] = {
+        25,     50,     75,     100,    125,    150,    175,    200,
+        225,    250,    275,    300,    325,    350,    375,    400,
+        425,    450,    475,    500,    525,    550,    575,    600,
+        625,    650,    675,    700,    725,    750,    775,    800,
+        825,    850,    875,    900,    925,    950,    975,    1000,
+        1025,   1050,   1075,   1100,   1125,   1150,   1175,   1200,
+        1225,   1250,   1275,   1300,   1325,   1350,   1375,   1400,
+        1425,   1450,   1475,   1500,   1500,   1500,   1500,   1500,
+};
+
 struct abx500_bm_data ab8500_bm_data = {
 	.temp_under             = 3,
 	.temp_low               = 8,
@@ -442,22 +502,60 @@
 	.fg_res                 = 100,
 	.cap_levels             = &cap_levels,
 	.bat_type               = bat_type_thermistor,
-	.n_btypes               = 3,
+	.n_btypes               = ARRAY_SIZE(bat_type_thermistor),
 	.batt_id                = 0,
 	.interval_charging      = 5,
 	.interval_not_charging  = 120,
 	.temp_hysteresis        = 3,
 	.gnd_lift_resistance    = 34,
-	.maxi                   = &maxi_params,
+	.chg_output_curr        = ab8500_charge_output_curr_map,
+	.n_chg_out_curr         = ARRAY_SIZE(ab8500_charge_output_curr_map),
+	.maxi                   = &ab8500_maxi_params,
 	.chg_params             = &chg,
 	.fg_params              = &fg,
+        .chg_input_curr         = ab8500_charge_input_curr_map,
+        .n_chg_in_curr          = ARRAY_SIZE(ab8500_charge_input_curr_map),
+};
+
+struct abx500_bm_data ab8540_bm_data = {
+        .temp_under             = 3,
+        .temp_low               = 8,
+        .temp_high              = 43,
+        .temp_over              = 48,
+        .main_safety_tmr_h      = 4,
+        .temp_interval_chg      = 20,
+        .temp_interval_nochg    = 120,
+        .usb_safety_tmr_h       = 4,
+        .bkup_bat_v             = BUP_VCH_SEL_2P6V,
+        .bkup_bat_i             = BUP_ICH_SEL_150UA,
+        .no_maintenance         = false,
+        .capacity_scaling       = false,
+        .adc_therm              = ABx500_ADC_THERM_BATCTRL,
+        .chg_unknown_bat        = false,
+        .enable_overshoot       = false,
+        .fg_res                 = 100,
+        .cap_levels             = &cap_levels,
+        .bat_type               = bat_type_thermistor,
+        .n_btypes               = ARRAY_SIZE(bat_type_thermistor),
+        .batt_id                = 0,
+        .interval_charging      = 5,
+        .interval_not_charging  = 120,
+        .temp_hysteresis        = 3,
+        .gnd_lift_resistance    = 0,
+        .maxi                   = &abx540_maxi_params,
+        .chg_params             = &chg,
+        .fg_params              = &fg,
+        .chg_output_curr        = ab8540_charge_output_curr_map,
+        .n_chg_out_curr         = ARRAY_SIZE(ab8540_charge_output_curr_map),
+        .chg_input_curr         = ab8540_charge_input_curr_map,
+        .n_chg_in_curr          = ARRAY_SIZE(ab8540_charge_input_curr_map),
 };
 
 int ab8500_bm_of_probe(struct device *dev,
 		       struct device_node *np,
 		       struct abx500_bm_data *bm)
 {
-	struct batres_vs_temp *tmp_batres_tbl;
+	const struct batres_vs_temp *tmp_batres_tbl;
 	struct device_node *battery_node;
 	const char *btech;
 	int i;
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
index 0768906..d412d34 100644
--- a/drivers/power/ab8500_btemp.c
+++ b/drivers/power/ab8500_btemp.c
@@ -42,6 +42,9 @@
 #define BTEMP_BATCTRL_CURR_SRC_16UA	16
 #define BTEMP_BATCTRL_CURR_SRC_18UA	18
 
+#define BTEMP_BATCTRL_CURR_SRC_60UA	60
+#define BTEMP_BATCTRL_CURR_SRC_120UA	120
+
 #define to_ab8500_btemp_device_info(x) container_of((x), \
 	struct ab8500_btemp, btemp_psy);
 
@@ -76,8 +79,8 @@
  * @dev:		Pointer to the structure device
  * @node:		List of AB8500 BTEMPs, hence prepared for reentrance
  * @curr_source:	What current source we use, in uA
- * @bat_temp:		Battery temperature in degree Celcius
- * @prev_bat_temp	Last dispatched battery temperature
+ * @bat_temp:		Dispatched battery temperature in degree Celcius
+ * @prev_bat_temp	Last measured battery temperature in degree Celcius
  * @parent:		Pointer to the struct ab8500
  * @gpadc:		Pointer to the struct gpadc
  * @fg:			Pointer to the struct fg
@@ -128,6 +131,7 @@
 
 	return btemp;
 }
+EXPORT_SYMBOL(ab8500_btemp_get);
 
 /**
  * ab8500_btemp_batctrl_volt_to_res() - convert batctrl voltage to resistance
@@ -155,7 +159,7 @@
 	if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL) {
 		/*
 		 * If the battery has internal NTC, we use the current
-		 * source to calculate the resistance, 7uA or 20uA
+		 * source to calculate the resistance.
 		 */
 		rbs = (v_batctrl * 1000
 		       - di->bm->gnd_lift_resistance * inst_curr)
@@ -216,7 +220,12 @@
 	/* Only do this for batteries with internal NTC */
 	if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL && enable) {
 
-		if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
+		if (is_ab8540(di->parent)) {
+			if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_60UA)
+				curr = BAT_CTRL_60U_ENA;
+			else
+				curr = BAT_CTRL_120U_ENA;
+		} else if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
 			if (di->curr_source == BTEMP_BATCTRL_CURR_SRC_16UA)
 				curr = BAT_CTRL_16U_ENA;
 			else
@@ -257,7 +266,14 @@
 	} else if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL && !enable) {
 		dev_dbg(di->dev, "Disable BATCTRL curr source\n");
 
-		if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
+		if (is_ab8540(di->parent)) {
+			/* Write 0 to the curr bits */
+			ret = abx500_mask_and_set_register_interruptible(
+				di->dev,
+				AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+				BAT_CTRL_60U_ENA | BAT_CTRL_120U_ENA,
+				~(BAT_CTRL_60U_ENA | BAT_CTRL_120U_ENA));
+		} else if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
 			/* Write 0 to the curr bits */
 			ret = abx500_mask_and_set_register_interruptible(
 				di->dev,
@@ -314,7 +330,13 @@
 	 * if we got an error above
 	 */
 disable_curr_source:
-	if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
+	if (is_ab8540(di->parent)) {
+		/* Write 0 to the curr bits */
+		ret = abx500_mask_and_set_register_interruptible(di->dev,
+			AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
+			BAT_CTRL_60U_ENA | BAT_CTRL_120U_ENA,
+			~(BAT_CTRL_60U_ENA | BAT_CTRL_120U_ENA));
+	} else if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
 		/* Write 0 to the curr bits */
 		ret = abx500_mask_and_set_register_interruptible(di->dev,
 			AB8500_CHARGER, AB8500_BAT_CTRL_CURRENT_SOURCE,
@@ -541,7 +563,9 @@
 {
 	int res;
 	u8 i;
-	if (is_ab9540(di->parent) || is_ab8505(di->parent))
+	if (is_ab8540(di->parent))
+		di->curr_source = BTEMP_BATCTRL_CURR_SRC_60UA;
+	else if (is_ab9540(di->parent) || is_ab8505(di->parent))
 		di->curr_source = BTEMP_BATCTRL_CURR_SRC_16UA;
 	else
 		di->curr_source = BTEMP_BATCTRL_CURR_SRC_7UA;
@@ -579,12 +603,17 @@
 
 	/*
 	 * We only have to change current source if the
-	 * detected type is Type 1, else we use the 7uA source
+	 * detected type is Type 1.
 	 */
 	if (di->bm->adc_therm == ABx500_ADC_THERM_BATCTRL &&
-			di->bm->batt_id == 1) {
-		if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
-			dev_dbg(di->dev, "Set BATCTRL current source to 16uA\n");
+	    di->bm->batt_id == 1) {
+		if (is_ab8540(di->parent)) {
+			dev_dbg(di->dev,
+				"Set BATCTRL current source to 60uA\n");
+			di->curr_source = BTEMP_BATCTRL_CURR_SRC_60UA;
+		} else if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
+			dev_dbg(di->dev,
+				"Set BATCTRL current source to 16uA\n");
 			di->curr_source = BTEMP_BATCTRL_CURR_SRC_16UA;
 		} else {
 			dev_dbg(di->dev, "Set BATCTRL current source to 20uA\n");
@@ -604,22 +633,37 @@
 static void ab8500_btemp_periodic_work(struct work_struct *work)
 {
 	int interval;
+	int bat_temp;
 	struct ab8500_btemp *di = container_of(work,
 		struct ab8500_btemp, btemp_periodic_work.work);
 
 	if (!di->initialized) {
-		di->initialized = true;
 		/* Identify the battery */
 		if (ab8500_btemp_id(di) < 0)
 			dev_warn(di->dev, "failed to identify the battery\n");
 	}
 
-	di->bat_temp = ab8500_btemp_measure_temp(di);
-
-	if (di->bat_temp != di->prev_bat_temp) {
-		di->prev_bat_temp = di->bat_temp;
+	bat_temp = ab8500_btemp_measure_temp(di);
+	/*
+	 * Filter battery temperature.
+	 * Allow direct updates on temperature only if two samples result in
+	 * same temperature. Else only allow 1 degree change from previous
+	 * reported value in the direction of the new measurement.
+	 */
+	if ((bat_temp == di->prev_bat_temp) || !di->initialized) {
+		if ((di->bat_temp != di->prev_bat_temp) || !di->initialized) {
+			di->initialized = true;
+			di->bat_temp = bat_temp;
+			power_supply_changed(&di->btemp_psy);
+		}
+	} else if (bat_temp < di->prev_bat_temp) {
+		di->bat_temp--;
+		power_supply_changed(&di->btemp_psy);
+	} else if (bat_temp > di->prev_bat_temp) {
+		di->bat_temp++;
 		power_supply_changed(&di->btemp_psy);
 	}
+	di->prev_bat_temp = bat_temp;
 
 	if (di->events.ac_conn || di->events.usb_conn)
 		interval = di->bm->temp_interval_chg;
@@ -772,7 +816,7 @@
  *
  * Returns battery temperature
  */
-static int ab8500_btemp_get_temp(struct ab8500_btemp *di)
+int ab8500_btemp_get_temp(struct ab8500_btemp *di)
 {
 	int temp = 0;
 
@@ -808,6 +852,7 @@
 	}
 	return temp;
 }
+EXPORT_SYMBOL(ab8500_btemp_get_temp);
 
 /**
  * ab8500_btemp_get_batctrl_temp() - get the temperature
@@ -819,6 +864,7 @@
 {
 	return btemp->bat_temp * 1000;
 }
+EXPORT_SYMBOL(ab8500_btemp_get_batctrl_temp);
 
 /**
  * ab8500_btemp_get_property() - get the btemp properties
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index 24b30b7..a558318 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -15,6 +15,7 @@
 #include <linux/device.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
+#include <linux/notifier.h>
 #include <linux/slab.h>
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
@@ -52,10 +53,15 @@
 #define VBUS_DET_DBNC100		0x02
 #define VBUS_DET_DBNC1			0x01
 #define OTP_ENABLE_WD			0x01
+#define DROP_COUNT_RESET		0x01
+#define USB_CH_DET			0x01
 
 #define MAIN_CH_INPUT_CURR_SHIFT	4
 #define VBUS_IN_CURR_LIM_SHIFT		4
+#define AB8540_VBUS_IN_CURR_LIM_SHIFT	2
 #define AUTO_VBUS_IN_CURR_LIM_SHIFT	4
+#define AB8540_AUTO_VBUS_IN_CURR_MASK	0x3F
+#define VBUS_IN_CURR_LIM_RETRY_SET_TIME	30 /* seconds */
 
 #define LED_INDICATOR_PWM_ENA		0x01
 #define LED_INDICATOR_PWM_DIS		0x00
@@ -77,7 +83,9 @@
 
 /* UsbLineStatus register bit masks */
 #define AB8500_USB_LINK_STATUS		0x78
+#define AB8505_USB_LINK_STATUS		0xF8
 #define AB8500_STD_HOST_SUSP		0x18
+#define USB_LINK_STATUS_SHIFT		3
 
 /* Watchdog timeout constant */
 #define WD_TIMER			0x30 /* 4min */
@@ -96,6 +104,10 @@
 #define AB8500_SW_CONTROL_FALLBACK	0x03
 /* Wait for enumeration before charing in us */
 #define WAIT_ACA_RID_ENUMERATION	(5 * 1000)
+/*External charger control*/
+#define AB8500_SYS_CHARGER_CONTROL_REG		0x52
+#define EXTERNAL_CHARGER_DISABLE_REG_VAL	0x03
+#define EXTERNAL_CHARGER_ENABLE_REG_VAL		0x07
 
 /* UsbLineStatus register - usb types */
 enum ab8500_charger_link_status {
@@ -196,10 +208,15 @@
 	spinlock_t usb_lock;
 };
 
+struct ab8500_charger_max_usb_in_curr {
+	int usb_type_max;
+	int set_max;
+	int calculated_max;
+};
+
 /**
  * struct ab8500_charger - ab8500 Charger device information
  * @dev:		Pointer to the structure device
- * @max_usb_in_curr:	Max USB charger input current
  * @vbus_detected:	VBUS detected
  * @vbus_detected_start:
  *			VBUS detected during startup
@@ -214,7 +231,6 @@
  * @autopower		Indicate if we should have automatic pwron after pwrloss
  * @autopower_cfg	platform specific power config support for "pwron after pwrloss"
  * @invalid_charger_detect_state State when forcing AB to use invalid charger
- * @is_usb_host:	Indicate if last detected USB type is host
  * @is_aca_rid:		Incicate if accessory is ACA type
  * @current_stepping_sessions:
  *			Counter for current stepping sessions
@@ -223,6 +239,7 @@
  * @bm:           	Platform specific battery management information
  * @flags:		Structure for information about events triggered
  * @usb_state:		Structure for usb stack information
+ * @max_usb_in_curr:	Max USB charger input current
  * @ac_chg:		AC charger power supply
  * @usb_chg:		USB charger power supply
  * @ac:			Structure that holds the AC charger properties
@@ -254,7 +271,6 @@
  */
 struct ab8500_charger {
 	struct device *dev;
-	int max_usb_in_curr;
 	bool vbus_detected;
 	bool vbus_detected_start;
 	bool ac_conn;
@@ -266,7 +282,6 @@
 	bool autopower;
 	bool autopower_cfg;
 	int invalid_charger_detect_state;
-	bool is_usb_host;
 	int is_aca_rid;
 	atomic_t current_stepping_sessions;
 	struct ab8500 *parent;
@@ -274,6 +289,7 @@
 	struct abx500_bm_data *bm;
 	struct ab8500_charger_event_flags flags;
 	struct ab8500_charger_usb_state usb_state;
+	struct ab8500_charger_max_usb_in_curr max_usb_in_curr;
 	struct ux500_charger ac_chg;
 	struct ux500_charger usb_chg;
 	struct ab8500_charger_info ac;
@@ -415,13 +431,18 @@
 	if (connected != di->usb.charger_connected) {
 		dev_dbg(di->dev, "USB connected:%i\n", connected);
 		di->usb.charger_connected = connected;
+
+		if (!connected)
+			di->flags.vbus_drop_end = false;
+
 		sysfs_notify(&di->usb_chg.psy.dev->kobj, NULL, "present");
 
 		if (connected) {
 			mutex_lock(&di->charger_attached_mutex);
 			mutex_unlock(&di->charger_attached_mutex);
 
-			queue_delayed_work(di->charger_wq,
+			if (is_ab8500(di->parent))
+				queue_delayed_work(di->charger_wq,
 					   &di->usb_charger_attached_work,
 					   HZ);
 		} else {
@@ -668,23 +689,19 @@
 	case USB_STAT_STD_HOST_C_S:
 		dev_dbg(di->dev, "USB Type - Standard host is "
 			"detected through USB driver\n");
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
-		di->is_usb_host = true;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
 		di->is_aca_rid = 0;
 		break;
 	case USB_STAT_HOST_CHG_HS_CHIRP:
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
-		di->is_usb_host = true;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
 		di->is_aca_rid = 0;
 		break;
 	case USB_STAT_HOST_CHG_HS:
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
-		di->is_usb_host = true;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
 		di->is_aca_rid = 0;
 		break;
 	case USB_STAT_ACA_RID_C_HS:
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P9;
-		di->is_usb_host = false;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P9;
 		di->is_aca_rid = 0;
 		break;
 	case USB_STAT_ACA_RID_A:
@@ -693,8 +710,7 @@
 		 * can consume (900mA). Closest level is 500mA
 		 */
 		dev_dbg(di->dev, "USB_STAT_ACA_RID_A detected\n");
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
-		di->is_usb_host = false;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
 		di->is_aca_rid = 1;
 		break;
 	case USB_STAT_ACA_RID_B:
@@ -702,38 +718,35 @@
 		 * Dedicated charger level minus 120mA (20mA for ACA and
 		 * 100mA for potential accessory). Closest level is 1300mA
 		 */
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P3;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_1P3;
 		dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d", link_status,
-				di->max_usb_in_curr);
-		di->is_usb_host = false;
+				di->max_usb_in_curr.usb_type_max);
 		di->is_aca_rid = 1;
 		break;
 	case USB_STAT_HOST_CHG_NM:
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
-		di->is_usb_host = true;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
 		di->is_aca_rid = 0;
 		break;
 	case USB_STAT_DEDICATED_CHG:
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
-		di->is_usb_host = false;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_1P5;
 		di->is_aca_rid = 0;
 		break;
 	case USB_STAT_ACA_RID_C_HS_CHIRP:
 	case USB_STAT_ACA_RID_C_NM:
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
-		di->is_usb_host = false;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_1P5;
 		di->is_aca_rid = 1;
 		break;
 	case USB_STAT_NOT_CONFIGURED:
 		if (di->vbus_detected) {
 			di->usb_device_is_unrecognised = true;
 			dev_dbg(di->dev, "USB Type - Legacy charger.\n");
-			di->max_usb_in_curr = USB_CH_IP_CUR_LVL_1P5;
+			di->max_usb_in_curr.usb_type_max =
+						USB_CH_IP_CUR_LVL_1P5;
 			break;
 		}
 	case USB_STAT_HM_IDGND:
 		dev_err(di->dev, "USB Type - Charging not allowed\n");
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
 		ret = -ENXIO;
 		break;
 	case USB_STAT_RESERVED:
@@ -743,12 +756,13 @@
 						"VBUS has collapsed\n");
 			ret = -ENXIO;
 			break;
-		}
-		if (is_ab9540(di->parent) || is_ab8505(di->parent)) {
+		} else {
 			dev_dbg(di->dev, "USB Type - Charging not allowed\n");
-			di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+			di->max_usb_in_curr.usb_type_max =
+						USB_CH_IP_CUR_LVL_0P05;
 			dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d",
-					link_status, di->max_usb_in_curr);
+				link_status,
+				di->max_usb_in_curr.usb_type_max);
 			ret = -ENXIO;
 			break;
 		}
@@ -757,23 +771,24 @@
 	case USB_STAT_CARKIT_2:
 	case USB_STAT_ACA_DOCK_CHARGER:
 	case USB_STAT_CHARGER_LINE_1:
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
 		dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d", link_status,
-				di->max_usb_in_curr);
+				di->max_usb_in_curr.usb_type_max);
 	case USB_STAT_NOT_VALID_LINK:
 		dev_err(di->dev, "USB Type invalid - try charging anyway\n");
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
 		break;
 
 	default:
 		dev_err(di->dev, "USB Type - Unknown\n");
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
 		ret = -ENXIO;
 		break;
 	};
 
+	di->max_usb_in_curr.set_max = di->max_usb_in_curr.usb_type_max;
 	dev_dbg(di->dev, "USB Type - 0x%02x MaxCurr: %d",
-		link_status, di->max_usb_in_curr);
+		link_status, di->max_usb_in_curr.set_max);
 
 	return ret;
 }
@@ -796,21 +811,22 @@
 		dev_err(di->dev, "%s ab8500 read failed\n", __func__);
 		return ret;
 	}
-	if (is_ab8500(di->parent)) {
+	if (is_ab8500(di->parent))
 		ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
-				AB8500_USB_LINE_STAT_REG, &val);
-	} else {
-		if (is_ab9540(di->parent) || is_ab8505(di->parent))
-			ret = abx500_get_register_interruptible(di->dev,
-				AB8500_USB, AB8500_USB_LINK1_STAT_REG, &val);
-	}
+			AB8500_USB_LINE_STAT_REG, &val);
+	else
+		ret = abx500_get_register_interruptible(di->dev,
+			AB8500_USB, AB8500_USB_LINK1_STAT_REG, &val);
 	if (ret < 0) {
 		dev_err(di->dev, "%s ab8500 read failed\n", __func__);
 		return ret;
 	}
 
 	/* get the USB type */
-	val = (val & AB8500_USB_LINK_STATUS) >> 3;
+	if (is_ab8500(di->parent))
+		val = (val & AB8500_USB_LINK_STATUS) >> USB_LINK_STATUS_SHIFT;
+	else
+		val = (val & AB8505_USB_LINK_STATUS) >> USB_LINK_STATUS_SHIFT;
 	ret = ab8500_charger_max_usb_curr(di,
 		(enum ab8500_charger_link_status) val);
 
@@ -865,7 +881,12 @@
 		 */
 
 		/* get the USB type */
-		val = (val & AB8500_USB_LINK_STATUS) >> 3;
+		if (is_ab8500(di->parent))
+			val = (val & AB8500_USB_LINK_STATUS) >>
+							USB_LINK_STATUS_SHIFT;
+		else
+			val = (val & AB8505_USB_LINK_STATUS) >>
+							USB_LINK_STATUS_SHIFT;
 		if (val)
 			break;
 	}
@@ -960,51 +981,6 @@
 	4600 ,
 };
 
-/*
- * This array maps the raw hex value to charger current used by the AB8500
- * Values taken from the UM0836
- */
-static int ab8500_charger_current_map[] = {
-	100 ,
-	200 ,
-	300 ,
-	400 ,
-	500 ,
-	600 ,
-	700 ,
-	800 ,
-	900 ,
-	1000 ,
-	1100 ,
-	1200 ,
-	1300 ,
-	1400 ,
-	1500 ,
-};
-
-/*
- * This array maps the raw hex value to VBUS input current used by the AB8500
- * Values taken from the UM0836
- */
-static int ab8500_charger_vbus_in_curr_map[] = {
-	USB_CH_IP_CUR_LVL_0P05,
-	USB_CH_IP_CUR_LVL_0P09,
-	USB_CH_IP_CUR_LVL_0P19,
-	USB_CH_IP_CUR_LVL_0P29,
-	USB_CH_IP_CUR_LVL_0P38,
-	USB_CH_IP_CUR_LVL_0P45,
-	USB_CH_IP_CUR_LVL_0P5,
-	USB_CH_IP_CUR_LVL_0P6,
-	USB_CH_IP_CUR_LVL_0P7,
-	USB_CH_IP_CUR_LVL_0P8,
-	USB_CH_IP_CUR_LVL_0P9,
-	USB_CH_IP_CUR_LVL_1P0,
-	USB_CH_IP_CUR_LVL_1P1,
-	USB_CH_IP_CUR_LVL_1P3,
-	USB_CH_IP_CUR_LVL_1P4,
-	USB_CH_IP_CUR_LVL_1P5,
-};
-
 static int ab8500_voltage_to_regval(int voltage)
 {
 	int i;
@@ -1026,41 +1002,41 @@
 		return -1;
 }
 
-static int ab8500_current_to_regval(int curr)
+static int ab8500_current_to_regval(struct ab8500_charger *di, int curr)
 {
 	int i;
 
-	if (curr < ab8500_charger_current_map[0])
+	if (curr < di->bm->chg_output_curr[0])
 		return 0;
 
-	for (i = 0; i < ARRAY_SIZE(ab8500_charger_current_map); i++) {
-		if (curr < ab8500_charger_current_map[i])
+	for (i = 0; i < di->bm->n_chg_out_curr; i++) {
+		if (curr < di->bm->chg_output_curr[i])
 			return i - 1;
 	}
 
 	/* If not last element, return error */
-	i = ARRAY_SIZE(ab8500_charger_current_map) - 1;
-	if (curr == ab8500_charger_current_map[i])
+	i = di->bm->n_chg_out_curr - 1;
+	if (curr == di->bm->chg_output_curr[i])
 		return i;
 	else
 		return -1;
 }
 
-static int ab8500_vbus_in_curr_to_regval(int curr)
+static int ab8500_vbus_in_curr_to_regval(struct ab8500_charger *di, int curr)
 {
 	int i;
 
-	if (curr < ab8500_charger_vbus_in_curr_map[0])
+	if (curr < di->bm->chg_input_curr[0])
 		return 0;
 
-	for (i = 0; i < ARRAY_SIZE(ab8500_charger_vbus_in_curr_map); i++) {
-		if (curr < ab8500_charger_vbus_in_curr_map[i])
+	for (i = 0; i < di->bm->n_chg_in_curr; i++) {
+		if (curr < di->bm->chg_input_curr[i])
 			return i - 1;
 	}
 
 	/* If not last element, return error */
-	i = ARRAY_SIZE(ab8500_charger_vbus_in_curr_map) - 1;
-	if (curr == ab8500_charger_vbus_in_curr_map[i])
+	i = di->bm->n_chg_in_curr - 1;
+	if (curr == di->bm->chg_input_curr[i])
 		return i;
 	else
 		return -1;
@@ -1077,28 +1053,48 @@
  */
 static int ab8500_charger_get_usb_cur(struct ab8500_charger *di)
 {
+	int ret = 0;
 	switch (di->usb_state.usb_current) {
 	case 100:
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P09;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P09;
 		break;
 	case 200:
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P19;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P19;
 		break;
 	case 300:
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P29;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P29;
 		break;
 	case 400:
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P38;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P38;
 		break;
 	case 500:
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P5;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P5;
 		break;
 	default:
-		di->max_usb_in_curr = USB_CH_IP_CUR_LVL_0P05;
-		return -1;
+		di->max_usb_in_curr.usb_type_max = USB_CH_IP_CUR_LVL_0P05;
+		ret = -EPERM;
 		break;
 	};
-	return 0;
+	di->max_usb_in_curr.set_max = di->max_usb_in_curr.usb_type_max;
+	return ret;
+}
+
+/**
+ * ab8500_charger_check_continue_stepping() - Check to allow stepping
+ * @di:		pointer to the ab8500_charger structure
+ * @reg:	select what charger register to check
+ *
+ * Check if current stepping should be allowed to continue.
+ * Checks if charger source has not collapsed. If it has, further stepping
+ * is not allowed.
+ */
+static bool ab8500_charger_check_continue_stepping(struct ab8500_charger *di,
+						   int reg)
+{
+	if (reg == AB8500_USBCH_IPT_CRNTLVL_REG)
+		return !di->flags.vbus_drop_end;
+	else
+		return true;
 }
 
 /**
@@ -1118,7 +1114,7 @@
 	int ich, int reg)
 {
 	int ret = 0;
-	int auto_curr_index, curr_index, prev_curr_index, shift_value, i;
+	int curr_index, prev_curr_index, shift_value, i;
 	u8 reg_value;
 	u32 step_udelay;
 	bool no_stepping = false;
@@ -1136,39 +1132,27 @@
 	case AB8500_MCH_IPT_CURLVL_REG:
 		shift_value = MAIN_CH_INPUT_CURR_SHIFT;
 		prev_curr_index = (reg_value >> shift_value);
-		curr_index = ab8500_current_to_regval(ich);
+		curr_index = ab8500_current_to_regval(di, ich);
 		step_udelay = STEP_UDELAY;
 		if (!di->ac.charger_connected)
 			no_stepping = true;
 		break;
 	case AB8500_USBCH_IPT_CRNTLVL_REG:
-		shift_value = VBUS_IN_CURR_LIM_SHIFT;
+		if (is_ab8540(di->parent))
+			shift_value = AB8540_VBUS_IN_CURR_LIM_SHIFT;
+		else
+			shift_value = VBUS_IN_CURR_LIM_SHIFT;
 		prev_curr_index = (reg_value >> shift_value);
-		curr_index = ab8500_vbus_in_curr_to_regval(ich);
+		curr_index = ab8500_vbus_in_curr_to_regval(di, ich);
 		step_udelay = STEP_UDELAY * 100;
 
-		ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
-					AB8500_CH_USBCH_STAT2_REG, &reg_value);
-		if (ret < 0) {
-			dev_err(di->dev, "%s read failed\n", __func__);
-			goto exit_set_current;
-		}
-		auto_curr_index =
-			reg_value >> AUTO_VBUS_IN_CURR_LIM_SHIFT;
-
-		dev_dbg(di->dev, "%s Auto VBUS curr is %d mA\n",
-			__func__,
-			ab8500_charger_vbus_in_curr_map[auto_curr_index]);
-
-		prev_curr_index = min(prev_curr_index, auto_curr_index);
-
 		if (!di->usb.charger_connected)
 			no_stepping = true;
 		break;
 	case AB8500_CH_OPT_CRNTLVL_REG:
 		shift_value = 0;
 		prev_curr_index = (reg_value >> shift_value);
-		curr_index = ab8500_current_to_regval(ich);
+		curr_index = ab8500_current_to_regval(di, ich);
 		step_udelay = STEP_UDELAY;
 		if (curr_index && (curr_index - prev_curr_index) > 1)
 			step_udelay *= 100;
@@ -1219,7 +1203,8 @@
 				usleep_range(step_udelay, step_udelay * 2);
 		}
 	} else {
-		for (i = prev_curr_index + 1; i <= curr_index; i++) {
+		bool allow = true;
+		for (i = prev_curr_index + 1; i <= curr_index && allow; i++) {
 			dev_dbg(di->dev, "curr change_2 to: %x for 0x%02x\n",
 				(u8)i << shift_value, reg);
 			ret = abx500_set_register_interruptible(di->dev,
@@ -1230,6 +1215,8 @@
 			}
 			if (i != curr_index)
 				usleep_range(step_udelay, step_udelay * 2);
+
+			allow = ab8500_charger_check_continue_stepping(di, reg);
 		}
 	}
 
@@ -1255,6 +1242,11 @@
 
 	/* We should always use to lowest current limit */
 	min_value = min(di->bm->chg_params->usb_curr_max, ich_in);
+	if (di->max_usb_in_curr.set_max > 0)
+		min_value = min(di->max_usb_in_curr.set_max, min_value);
+
+	if (di->usb_state.usb_current >= 0)
+		min_value = min(di->usb_state.usb_current, min_value);
 
 	switch (min_value) {
 	case 100:
@@ -1400,8 +1392,8 @@
 
 		/* Check if the requested voltage or current is valid */
 		volt_index = ab8500_voltage_to_regval(vset);
-		curr_index = ab8500_current_to_regval(iset);
-		input_curr_index = ab8500_current_to_regval(
+		curr_index = ab8500_current_to_regval(di, iset);
+		input_curr_index = ab8500_current_to_regval(di,
 			di->bm->chg_params->ac_curr_max);
 		if (volt_index < 0 || curr_index < 0 || input_curr_index < 0) {
 			dev_err(di->dev,
@@ -1572,7 +1564,7 @@
 
 		/* Check if the requested voltage or current is valid */
 		volt_index = ab8500_voltage_to_regval(vset);
-		curr_index = ab8500_current_to_regval(ich_out);
+		curr_index = ab8500_current_to_regval(di, ich_out);
 		if (volt_index < 0 || curr_index < 0) {
 			dev_err(di->dev,
 				"Charger voltage or current too high, "
@@ -1609,7 +1601,8 @@
 		di->usb.charger_online = 1;
 
 		/* USBChInputCurr: current that can be drawn from the usb */
-		ret = ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+		ret = ab8500_charger_set_vbus_in_curr(di,
+					di->max_usb_in_curr.usb_type_max);
 		if (ret) {
 			dev_err(di->dev, "setting USBChInputCurr failed\n");
 			return ret;
@@ -1668,8 +1661,7 @@
 		dev_dbg(di->dev, "%s Disabled USB charging\n", __func__);
 
 		/* Cancel any pending Vbat check work */
-		if (delayed_work_pending(&di->check_vbat_work))
-			cancel_delayed_work(&di->check_vbat_work);
+		cancel_delayed_work(&di->check_vbat_work);
 
 	}
 	ab8500_power_supply_changed(di, &di->usb_chg.psy);
@@ -1677,6 +1669,128 @@
 	return ret;
 }
 
+static int ab8500_external_charger_prepare(struct notifier_block *charger_nb,
+				unsigned long event, void *data)
+{
+	int ret;
+	struct device *dev = data;
+	/*Toggle External charger control pin*/
+	ret = abx500_set_register_interruptible(dev, AB8500_SYS_CTRL1_BLOCK,
+				  AB8500_SYS_CHARGER_CONTROL_REG,
+				  EXTERNAL_CHARGER_DISABLE_REG_VAL);
+	if (ret < 0) {
+		dev_err(dev, "write reg failed %d\n", ret);
+		goto out;
+	}
+	ret = abx500_set_register_interruptible(dev, AB8500_SYS_CTRL1_BLOCK,
+				  AB8500_SYS_CHARGER_CONTROL_REG,
+				  EXTERNAL_CHARGER_ENABLE_REG_VAL);
+	if (ret < 0)
+		dev_err(dev, "Write reg failed %d\n", ret);
+
+out:
+	return ret;
+}
+
+/**
+ * ab8500_charger_usb_check_enable() - enable usb charging
+ * @charger:	pointer to the ux500_charger structure
+ * @vset:	charging voltage
+ * @iset:	charger output current
+ *
+ * Check if the VBUS charger has been disconnected and reconnected without
+ * AB8500 rising an interrupt. Returns 0 on success.
+ */
+static int ab8500_charger_usb_check_enable(struct ux500_charger *charger,
+	int vset, int iset)
+{
+	u8 usbch_ctrl1 = 0;
+	int ret = 0;
+
+	struct ab8500_charger *di = to_ab8500_charger_usb_device_info(charger);
+
+	if (!di->usb.charger_connected)
+		return ret;
+
+	ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+				AB8500_USBCH_CTRL1_REG, &usbch_ctrl1);
+	if (ret < 0) {
+		dev_err(di->dev, "ab8500 read failed %d\n", __LINE__);
+		return ret;
+	}
+	dev_dbg(di->dev, "USB charger ctrl: 0x%02x\n", usbch_ctrl1);
+
+	if (!(usbch_ctrl1 & USB_CH_ENA)) {
+		dev_info(di->dev, "Charging has been disabled abnormally and will be re-enabled\n");
+
+		ret = abx500_mask_and_set_register_interruptible(di->dev,
+					AB8500_CHARGER, AB8500_CHARGER_CTRL,
+					DROP_COUNT_RESET, DROP_COUNT_RESET);
+		if (ret < 0) {
+			dev_err(di->dev, "ab8500 write failed %d\n", __LINE__);
+			return ret;
+		}
+
+		ret = ab8500_charger_usb_en(&di->usb_chg, true, vset, iset);
+		if (ret < 0) {
+			dev_err(di->dev, "Failed to enable VBUS charger %d\n",
+					__LINE__);
+			return ret;
+		}
+	}
+	return ret;
+}
+
+/**
+ * ab8500_charger_ac_check_enable() - enable usb charging
+ * @charger:	pointer to the ux500_charger structure
+ * @vset:	charging voltage
+ * @iset:	charger output current
+ *
+ * Check if the AC charger has been disconnected and reconnected without
+ * AB8500 rising an interrupt. Returns 0 on success.
+ */
+static int ab8500_charger_ac_check_enable(struct ux500_charger *charger,
+	int vset, int iset)
+{
+	u8 mainch_ctrl1 = 0;
+	int ret = 0;
+
+	struct ab8500_charger *di = to_ab8500_charger_ac_device_info(charger);
+
+	if (!di->ac.charger_connected)
+		return ret;
+
+	ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+				AB8500_MCH_CTRL1, &mainch_ctrl1);
+	if (ret < 0) {
+		dev_err(di->dev, "ab8500 read failed %d\n", __LINE__);
+		return ret;
+	}
+	dev_dbg(di->dev, "AC charger ctrl: 0x%02x\n", mainch_ctrl1);
+
+	if (!(mainch_ctrl1 & MAIN_CH_ENA)) {
+		dev_info(di->dev, "Charging has been disabled abnormally and will be re-enabled\n");
+
+		ret = abx500_mask_and_set_register_interruptible(di->dev,
+					AB8500_CHARGER, AB8500_CHARGER_CTRL,
+					DROP_COUNT_RESET, DROP_COUNT_RESET);
+
+		if (ret < 0) {
+			dev_err(di->dev, "ab8500 write failed %d\n", __LINE__);
+			return ret;
+		}
+
+		ret = ab8500_charger_ac_en(&di->usb_chg, true, vset, iset);
+		if (ret < 0) {
+			dev_err(di->dev, "failed to enable AC charger %d\n",
+				__LINE__);
+			return ret;
+		}
+	}
+	return ret;
+}
+
 /**
  * ab8500_charger_watchdog_kick() - kick charger watchdog
  * @di:		pointer to the ab8500_charger structure
@@ -1734,8 +1848,68 @@
 
 	/* Reset the main and usb drop input current measurement counter */
 	ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
-				AB8500_CHARGER_CTRL,
-				0x1);
+				AB8500_CHARGER_CTRL, DROP_COUNT_RESET);
+	if (ret) {
+		dev_err(di->dev, "%s write failed\n", __func__);
+		return ret;
+	}
+
+	return ret;
+}
+
+/**
+ * ab8540_charger_power_path_enable() - enable usb power path mode
+ * @charger:	pointer to the ux500_charger structure
+ * @enable:	enable/disable flag
+ *
+ * Enable or disable the power path for usb mode
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8540_charger_power_path_enable(struct ux500_charger *charger,
+		bool enable)
+{
+	int ret;
+	struct ab8500_charger *di;
+
+	if (charger->psy.type == POWER_SUPPLY_TYPE_USB)
+		di = to_ab8500_charger_usb_device_info(charger);
+	else
+		return -ENXIO;
+
+	ret = abx500_mask_and_set_register_interruptible(di->dev,
+				AB8500_CHARGER, AB8540_USB_PP_MODE_REG,
+				BUS_POWER_PATH_MODE_ENA, enable);
+	if (ret) {
+		dev_err(di->dev, "%s write failed\n", __func__);
+		return ret;
+	}
+
+	return ret;
+}
+
+
+/**
+ * ab8540_charger_usb_pre_chg_enable() - enable usb pre change
+ * @charger:	pointer to the ux500_charger structure
+ * @enable:	enable/disable flag
+ *
+ * Enable or disable the pre-chage for usb mode
+ * Returns error code in case of failure else 0(on success)
+ */
+static int ab8540_charger_usb_pre_chg_enable(struct ux500_charger *charger,
+		bool enable)
+{
+	int ret;
+	struct ab8500_charger *di;
+
+	if (charger->psy.type == POWER_SUPPLY_TYPE_USB)
+		di = to_ab8500_charger_usb_device_info(charger);
+	else
+		return -ENXIO;
+
+	ret = abx500_mask_and_set_register_interruptible(di->dev,
+				AB8500_CHARGER, AB8540_USB_PP_CHR_REG,
+				BUS_POWER_PATH_PRECHG_ENA, enable);
 	if (ret) {
 		dev_err(di->dev, "%s write failed\n", __func__);
 		return ret;
@@ -1823,9 +1997,10 @@
 		di->vbat > VBAT_TRESH_IP_CUR_RED))) {
 
 		dev_dbg(di->dev, "Vbat did cross threshold, curr: %d, new: %d,"
-			" old: %d\n", di->max_usb_in_curr, di->vbat,
-			di->old_vbat);
-		ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+			" old: %d\n", di->max_usb_in_curr.usb_type_max,
+			di->vbat, di->old_vbat);
+		ab8500_charger_set_vbus_in_curr(di,
+					di->max_usb_in_curr.usb_type_max);
 		power_supply_changed(&di->usb_chg.psy);
 	}
 
@@ -2105,7 +2280,8 @@
 
 	/* Update maximum input current if USB enumeration is not detected */
 	if (!di->usb.charger_online) {
-		ret = ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+		ret = ab8500_charger_set_vbus_in_curr(di,
+					di->max_usb_in_curr.usb_type_max);
 		if (ret)
 			return;
 	}
@@ -2125,6 +2301,7 @@
 	int detected_chargers;
 	int ret;
 	u8 val;
+	u8 link_status;
 
 	struct ab8500_charger *di = container_of(work,
 		struct ab8500_charger, usb_link_status_work);
@@ -2144,38 +2321,61 @@
 	 * to start the charging process. but by jumping
 	 * thru a few hoops it can be forced to start.
 	 */
-	ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
-			AB8500_USB_LINE_STAT_REG, &val);
+	if (is_ab8500(di->parent))
+		ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
+					AB8500_USB_LINE_STAT_REG, &val);
+	else
+		ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
+					AB8500_USB_LINK1_STAT_REG, &val);
+
 	if (ret >= 0)
 		dev_dbg(di->dev, "UsbLineStatus register = 0x%02x\n", val);
 	else
 		dev_dbg(di->dev, "Error reading USB link status\n");
 
+	if (is_ab8500(di->parent))
+		link_status = AB8500_USB_LINK_STATUS;
+	else
+		link_status = AB8505_USB_LINK_STATUS;
+
 	if (detected_chargers & USB_PW_CONN) {
-		if (((val & AB8500_USB_LINK_STATUS) >> 3) == USB_STAT_NOT_VALID_LINK &&
+		if (((val & link_status) >> USB_LINK_STATUS_SHIFT) ==
+				USB_STAT_NOT_VALID_LINK &&
 				di->invalid_charger_detect_state == 0) {
-			dev_dbg(di->dev, "Invalid charger detected, state= 0\n");
+			dev_dbg(di->dev,
+					"Invalid charger detected, state= 0\n");
 			/*Enable charger*/
 			abx500_mask_and_set_register_interruptible(di->dev,
-					AB8500_CHARGER, AB8500_USBCH_CTRL1_REG, 0x01, 0x01);
+					AB8500_CHARGER, AB8500_USBCH_CTRL1_REG,
+					USB_CH_ENA, USB_CH_ENA);
 			/*Enable charger detection*/
-			abx500_mask_and_set_register_interruptible(di->dev, AB8500_USB,
-					AB8500_MCH_IPT_CURLVL_REG, 0x01, 0x01);
+			abx500_mask_and_set_register_interruptible(di->dev,
+					AB8500_USB, AB8500_USB_LINE_CTRL2_REG,
+					USB_CH_DET, USB_CH_DET);
 			di->invalid_charger_detect_state = 1;
 			/*exit and wait for new link status interrupt.*/
 			return;
 
 		}
 		if (di->invalid_charger_detect_state == 1) {
-			dev_dbg(di->dev, "Invalid charger detected, state= 1\n");
+			dev_dbg(di->dev,
+					"Invalid charger detected, state= 1\n");
 			/*Stop charger detection*/
-			abx500_mask_and_set_register_interruptible(di->dev, AB8500_USB,
-					AB8500_MCH_IPT_CURLVL_REG, 0x01, 0x00);
+			abx500_mask_and_set_register_interruptible(di->dev,
+					AB8500_USB, AB8500_USB_LINE_CTRL2_REG,
+					USB_CH_DET, 0x00);
 			/*Check link status*/
-			ret = abx500_get_register_interruptible(di->dev, AB8500_USB,
-					AB8500_USB_LINE_STAT_REG, &val);
+			if (is_ab8500(di->parent))
+				ret = abx500_get_register_interruptible(di->dev,
+					AB8500_USB, AB8500_USB_LINE_STAT_REG,
+					&val);
+			else
+				ret = abx500_get_register_interruptible(di->dev,
+					AB8500_USB, AB8500_USB_LINK1_STAT_REG,
+					&val);
+
 			dev_dbg(di->dev, "USB link status= 0x%02x\n",
-					(val & AB8500_USB_LINK_STATUS) >> 3);
+				(val & link_status) >> USB_LINK_STATUS_SHIFT);
 			di->invalid_charger_detect_state = 2;
 		}
 	} else {
@@ -2273,7 +2473,7 @@
 		if (!ab8500_charger_get_usb_cur(di)) {
 			/* Update maximum input current */
 			ret = ab8500_charger_set_vbus_in_curr(di,
-					di->max_usb_in_curr);
+					di->max_usb_in_curr.usb_type_max);
 			if (ret)
 				return;
 
@@ -2422,7 +2622,9 @@
 
 	mutex_lock(&di->charger_attached_mutex);
 	mutex_unlock(&di->charger_attached_mutex);
-	queue_delayed_work(di->charger_wq,
+
+	if (is_ab8500(di->parent))
+		queue_delayed_work(di->charger_wq,
 			   &di->ac_charger_attached_work,
 			   HZ);
 	return IRQ_HANDLED;
@@ -2491,6 +2693,8 @@
 {
 	struct ab8500_charger *di = container_of(work,
 		struct ab8500_charger, vbus_drop_end_work.work);
+	int ret, curr;
+	u8 reg_value;
 
 	di->flags.vbus_drop_end = false;
 
@@ -2498,8 +2702,45 @@
 	abx500_set_register_interruptible(di->dev,
 				  AB8500_CHARGER, AB8500_CHARGER_CTRL, 0x01);
 
+	if (is_ab8540(di->parent))
+		ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+				AB8540_CH_USBCH_STAT3_REG, &reg_value);
+	else
+		ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+				AB8500_CH_USBCH_STAT2_REG, &reg_value);
+	if (ret < 0) {
+		dev_err(di->dev, "%s read failed\n", __func__);
+		return;
+	}
+
+	if (is_ab8540(di->parent))
+		curr = di->bm->chg_input_curr[
+			reg_value & AB8540_AUTO_VBUS_IN_CURR_MASK];
+	else
+		curr = di->bm->chg_input_curr[
+			reg_value >> AUTO_VBUS_IN_CURR_LIM_SHIFT];
+
+	if (di->max_usb_in_curr.calculated_max != curr) {
+		/* USB source is collapsing */
+		di->max_usb_in_curr.calculated_max = curr;
+		dev_dbg(di->dev,
+			 "VBUS input current limiting to %d mA\n",
+			 di->max_usb_in_curr.calculated_max);
+	} else {
+		/*
+		 * USB source can not give more than this amount.
+		 * Taking more will collapse the source.
+		 */
+		di->max_usb_in_curr.set_max =
+			di->max_usb_in_curr.calculated_max;
+		dev_dbg(di->dev,
+			 "VBUS input current limited to %d mA\n",
+			 di->max_usb_in_curr.set_max);
+	}
+
 	if (di->usb.charger_connected)
-		ab8500_charger_set_vbus_in_curr(di, di->max_usb_in_curr);
+		ab8500_charger_set_vbus_in_curr(di,
+					di->max_usb_in_curr.usb_type_max);
 }
 
 /**
@@ -2654,8 +2895,13 @@
 
 	dev_dbg(di->dev, "VBUS charger drop ended\n");
 	di->flags.vbus_drop_end = true;
+
+	/*
+	 * VBUS might have dropped due to bad connection.
+	 * Schedule a new input limit set to the value SW requests.
+	 */
 	queue_delayed_work(di->charger_wq, &di->vbus_drop_end_work,
-			   round_jiffies(30 * HZ));
+			   round_jiffies(VBUS_IN_CURR_LIM_RETRY_SET_TIME * HZ));
 
 	return IRQ_HANDLED;
 }
@@ -2836,6 +3082,7 @@
 static int ab8500_charger_init_hw_registers(struct ab8500_charger *di)
 {
 	int ret = 0;
+	u8 bup_vch_range = 0, vbup33_vrtcn = 0;
 
 	/* Setup maximum charger current and voltage for ABB cut2.0 */
 	if (!is_ab8500_1p1_or_earlier(di->parent)) {
@@ -2848,9 +3095,14 @@
 			goto out;
 		}
 
-		ret = abx500_set_register_interruptible(di->dev,
-			AB8500_CHARGER,
-			AB8500_CH_OPT_CRNTLVL_MAX_REG, CH_OP_CUR_LVL_1P6);
+		if (is_ab8540(di->parent))
+			ret = abx500_set_register_interruptible(di->dev,
+				AB8500_CHARGER, AB8500_CH_OPT_CRNTLVL_MAX_REG,
+				CH_OP_CUR_LVL_2P);
+		else
+			ret = abx500_set_register_interruptible(di->dev,
+				AB8500_CHARGER, AB8500_CH_OPT_CRNTLVL_MAX_REG,
+				CH_OP_CUR_LVL_1P6);
 		if (ret) {
 			dev_err(di->dev,
 				"failed to set CH_OPT_CRNTLVL_MAX_REG\n");
@@ -2858,7 +3110,8 @@
 		}
 	}
 
-	if (is_ab9540_2p0(di->parent) || is_ab8505_2p0(di->parent))
+	if (is_ab9540_2p0(di->parent) || is_ab9540_3p0(di->parent)
+	 || is_ab8505_2p0(di->parent) || is_ab8540(di->parent))
 		ret = abx500_mask_and_set_register_interruptible(di->dev,
 			AB8500_CHARGER,
 			AB8500_USBCH_CTRL2_REG,
@@ -2930,14 +3183,6 @@
 		goto out;
 	}
 
-	/* Set charger watchdog timeout */
-	ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER,
-		AB8500_CH_WD_TIMER_REG, WD_TIMER);
-	if (ret) {
-		dev_err(di->dev, "failed to set charger watchdog timeout\n");
-		goto out;
-	}
-
 	ret = ab8500_charger_led_en(di, false);
 	if (ret < 0) {
 		dev_err(di->dev, "failed to disable LED\n");
@@ -2945,15 +3190,30 @@
 	}
 
 	/* Backup battery voltage and current */
+	if (di->bm->bkup_bat_v > BUP_VCH_SEL_3P1V)
+		bup_vch_range = BUP_VCH_RANGE;
+	if (di->bm->bkup_bat_v == BUP_VCH_SEL_3P3V)
+		vbup33_vrtcn = VBUP33_VRTCN;
+
 	ret = abx500_set_register_interruptible(di->dev,
 		AB8500_RTC,
 		AB8500_RTC_BACKUP_CHG_REG,
-		di->bm->bkup_bat_v |
-		di->bm->bkup_bat_i);
+		(di->bm->bkup_bat_v & 0x3) | di->bm->bkup_bat_i);
 	if (ret) {
 		dev_err(di->dev, "failed to setup backup battery charging\n");
 		goto out;
 	}
+	if (is_ab8540(di->parent)) {
+		ret = abx500_set_register_interruptible(di->dev,
+			AB8500_RTC,
+			AB8500_RTC_CTRL1_REG,
+			bup_vch_range | vbup33_vrtcn);
+		if (ret) {
+			dev_err(di->dev,
+				"failed to setup backup battery charging\n");
+			goto out;
+		}
+	}
 
 	/* Enable backup battery charging */
 	abx500_mask_and_set_register_interruptible(di->dev,
@@ -2962,6 +3222,25 @@
 	if (ret < 0)
 		dev_err(di->dev, "%s mask and set failed\n", __func__);
 
+	if (is_ab8540(di->parent)) {
+		ret = abx500_mask_and_set_register_interruptible(di->dev,
+			AB8500_CHARGER, AB8540_USB_PP_MODE_REG,
+			BUS_VSYS_VOL_SELECT_MASK, BUS_VSYS_VOL_SELECT_3P6V);
+		if (ret) {
+			dev_err(di->dev,
+				"failed to setup usb power path vsys voltage\n");
+			goto out;
+		}
+		ret = abx500_mask_and_set_register_interruptible(di->dev,
+			AB8500_CHARGER, AB8540_USB_PP_CHR_REG,
+			BUS_PP_PRECHG_CURRENT_MASK, 0);
+		if (ret) {
+			dev_err(di->dev,
+				"failed to setup usb power path prechage current\n");
+			goto out;
+		}
+	}
+
 out:
 	return ret;
 }
@@ -3055,11 +3334,8 @@
 			dev_err(di->dev, "Failed to kick WD!\n");
 
 		/* If not already pending start a new timer */
-		if (!delayed_work_pending(
-			&di->kick_wd_work)) {
-			queue_delayed_work(di->charger_wq, &di->kick_wd_work,
-				round_jiffies(WD_KICK_INTERVAL));
-		}
+		queue_delayed_work(di->charger_wq, &di->kick_wd_work,
+				   round_jiffies(WD_KICK_INTERVAL));
 	}
 
 	/* If we still have a HW failure, schedule a new check */
@@ -3079,12 +3355,9 @@
 {
 	struct ab8500_charger *di = platform_get_drvdata(pdev);
 
-	/* Cancel any pending HW failure check */
-	if (delayed_work_pending(&di->check_hw_failure_work))
-		cancel_delayed_work(&di->check_hw_failure_work);
-
-	if (delayed_work_pending(&di->vbus_drop_end_work))
-		cancel_delayed_work(&di->vbus_drop_end_work);
+	/* Cancel any pending jobs */
+	cancel_delayed_work(&di->check_hw_failure_work);
+	cancel_delayed_work(&di->vbus_drop_end_work);
 
 	flush_delayed_work(&di->attach_work);
 	flush_delayed_work(&di->usb_charger_attached_work);
@@ -3107,6 +3380,10 @@
 #define ab8500_charger_resume       NULL
 #endif
 
+static struct notifier_block charger_nb = {
+	.notifier_call = ab8500_external_charger_prepare,
+};
+
 static int ab8500_charger_remove(struct platform_device *pdev)
 {
 	struct ab8500_charger *di = platform_get_drvdata(pdev);
@@ -3136,13 +3413,18 @@
 	/* Delete the work queue */
 	destroy_workqueue(di->charger_wq);
 
+	/* Unregister external charger enable notifier */
+	if (!di->ac_chg.enabled)
+		blocking_notifier_chain_unregister(
+			&charger_notifier_list, &charger_nb);
+
 	flush_scheduled_work();
-	if(di->usb_chg.enabled)
+	if (di->usb_chg.enabled)
 		power_supply_unregister(&di->usb_chg.psy);
-#if !defined(CONFIG_CHARGER_PM2301)
-	if(di->ac_chg.enabled)
+
+	if (di->ac_chg.enabled && !di->ac_chg.external)
 		power_supply_unregister(&di->ac_chg.psy);
-#endif
+
 	platform_set_drvdata(pdev, NULL);
 
 	return 0;
@@ -3206,16 +3488,22 @@
 	di->ac_chg.psy.num_supplicants = ARRAY_SIZE(supply_interface),
 	/* ux500_charger sub-class */
 	di->ac_chg.ops.enable = &ab8500_charger_ac_en;
+	di->ac_chg.ops.check_enable = &ab8500_charger_ac_check_enable;
 	di->ac_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
 	di->ac_chg.ops.update_curr = &ab8500_charger_update_charger_current;
 	di->ac_chg.max_out_volt = ab8500_charger_voltage_map[
 		ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
-	di->ac_chg.max_out_curr = ab8500_charger_current_map[
-		ARRAY_SIZE(ab8500_charger_current_map) - 1];
+	di->ac_chg.max_out_curr =
+		di->bm->chg_output_curr[di->bm->n_chg_out_curr - 1];
 	di->ac_chg.wdt_refresh = CHG_WD_INTERVAL;
 	di->ac_chg.enabled = di->bm->ac_enabled;
 	di->ac_chg.external = false;
 
+	/*notifier for external charger enabling*/
+	if (!di->ac_chg.enabled)
+		blocking_notifier_chain_register(
+			&charger_notifier_list, &charger_nb);
+
 	/* USB supply */
 	/* power_supply base class */
 	di->usb_chg.psy.name = "ab8500_usb";
@@ -3227,15 +3515,20 @@
 	di->usb_chg.psy.num_supplicants = ARRAY_SIZE(supply_interface),
 	/* ux500_charger sub-class */
 	di->usb_chg.ops.enable = &ab8500_charger_usb_en;
+	di->usb_chg.ops.check_enable = &ab8500_charger_usb_check_enable;
 	di->usb_chg.ops.kick_wd = &ab8500_charger_watchdog_kick;
 	di->usb_chg.ops.update_curr = &ab8500_charger_update_charger_current;
+	di->usb_chg.ops.pp_enable = &ab8540_charger_power_path_enable;
+	di->usb_chg.ops.pre_chg_enable = &ab8540_charger_usb_pre_chg_enable;
 	di->usb_chg.max_out_volt = ab8500_charger_voltage_map[
 		ARRAY_SIZE(ab8500_charger_voltage_map) - 1];
-	di->usb_chg.max_out_curr = ab8500_charger_current_map[
-		ARRAY_SIZE(ab8500_charger_current_map) - 1];
+	di->usb_chg.max_out_curr =
+		di->bm->chg_output_curr[di->bm->n_chg_out_curr - 1];
 	di->usb_chg.wdt_refresh = CHG_WD_INTERVAL;
 	di->usb_chg.enabled = di->bm->usb_enabled;
 	di->usb_chg.external = false;
+	di->usb_chg.power_path = di->bm->usb_power_path;
+	di->usb_state.usb_current = -1;
 
 	/* Create a work queue for the charger */
 	di->charger_wq =
@@ -3316,7 +3609,7 @@
 	}
 
 	/* Register AC charger class */
-	if(di->ac_chg.enabled) {
+	if (di->ac_chg.enabled) {
 		ret = power_supply_register(di->dev, &di->ac_chg.psy);
 		if (ret) {
 			dev_err(di->dev, "failed to register AC charger\n");
@@ -3325,7 +3618,7 @@
 	}
 
 	/* Register USB charger class */
-	if(di->usb_chg.enabled) {
+	if (di->usb_chg.enabled) {
 		ret = power_supply_register(di->dev, &di->usb_chg.psy);
 		if (ret) {
 			dev_err(di->dev, "failed to register USB charger\n");
@@ -3385,14 +3678,16 @@
 	ch_stat = ab8500_charger_detect_chargers(di, false);
 
 	if ((ch_stat & AC_PW_CONN) == AC_PW_CONN) {
-		queue_delayed_work(di->charger_wq,
-				   &di->ac_charger_attached_work,
-				   HZ);
+		if (is_ab8500(di->parent))
+			queue_delayed_work(di->charger_wq,
+					   &di->ac_charger_attached_work,
+					   HZ);
 	}
 	if ((ch_stat & USB_PW_CONN) == USB_PW_CONN) {
-		queue_delayed_work(di->charger_wq,
-				   &di->usb_charger_attached_work,
-				   HZ);
+		if (is_ab8500(di->parent))
+			queue_delayed_work(di->charger_wq,
+					   &di->usb_charger_attached_work,
+					   HZ);
 	}
 
 	mutex_unlock(&di->charger_attached_mutex);
@@ -3410,10 +3705,10 @@
 put_usb_phy:
 	usb_put_phy(di->usb_phy);
 free_usb:
-	if(di->usb_chg.enabled)
+	if (di->usb_chg.enabled)
 		power_supply_unregister(&di->usb_chg.psy);
 free_ac:
-	if(di->ac_chg.enabled)
+	if (di->ac_chg.enabled)
 		power_supply_unregister(&di->ac_chg.psy);
 free_charger_wq:
 	destroy_workqueue(di->charger_wq);
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index 25dae4c..c5391f5 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -36,7 +36,7 @@
 
 #define MILLI_TO_MICRO			1000
 #define FG_LSB_IN_MA			1627
-#define QLSB_NANO_AMP_HOURS_X10		1129
+#define QLSB_NANO_AMP_HOURS_X10		1071
 #define INS_CURR_TIMEOUT		(3 * HZ)
 
 #define SEC_TO_SAMPLE(S)		(S * 4)
@@ -672,11 +672,11 @@
 	/*
 	 * Convert to unit value in mA
 	 * Full scale input voltage is
-	 * 66.660mV => LSB = 66.660mV/(4096*res) = 1.627mA
+	 * 63.160mV => LSB = 63.160mV/(4096*res) = 1.542mA
 	 * Given a 250ms conversion cycle time the LSB corresponds
-	 * to 112.9 nAh. Convert to current by dividing by the conversion
+	 * to 107.1 nAh. Convert to current by dividing by the conversion
 	 * time in hours (250ms = 1 / (3600 * 4)h)
-	 * 112.9nAh assumes 10mOhm, but fg_res is in 0.1mOhm
+	 * 107.1nAh assumes 10mOhm, but fg_res is in 0.1mOhm
 	 */
 	val = (val * QLSB_NANO_AMP_HOURS_X10 * 36 * 4) /
 		(1000 * di->bm->fg_res);
@@ -863,7 +863,7 @@
 static int ab8500_fg_volt_to_capacity(struct ab8500_fg *di, int voltage)
 {
 	int i, tbl_size;
-	struct abx500_v_to_cap *tbl;
+	const struct abx500_v_to_cap *tbl;
 	int cap = 0;
 
 	tbl = di->bm->bat_type[di->bm->batt_id].v_to_cap_tbl,
@@ -915,7 +915,7 @@
 static int ab8500_fg_battery_resistance(struct ab8500_fg *di)
 {
 	int i, tbl_size;
-	struct batres_vs_temp *tbl;
+	const struct batres_vs_temp *tbl;
 	int resist = 0;
 
 	tbl = di->bm->bat_type[di->bm->batt_id].batres_tbl;
@@ -1354,9 +1354,6 @@
 			 * algorithm says.
 			 */
 			di->bat_cap.prev_percent = 1;
-			di->bat_cap.permille = 1;
-			di->bat_cap.prev_mah = 1;
-			di->bat_cap.mah = 1;
 			percent = 1;
 
 			changed = true;
@@ -1683,7 +1680,6 @@
 		break;
 
 	case AB8500_FG_DISCHARGE_WAKEUP:
-		ab8500_fg_coulomb_counter(di, true);
 		ab8500_fg_calc_cap_discharge_voltage(di, true);
 
 		di->fg_samples = SEC_TO_SAMPLE(
@@ -1768,9 +1764,10 @@
 			ab8500_fg_algorithm_discharging(di);
 	}
 
-	dev_dbg(di->dev, "[FG_DATA] %d %d %d %d %d %d %d %d %d "
+	dev_dbg(di->dev, "[FG_DATA] %d %d %d %d %d %d %d %d %d %d "
 		"%d %d %d %d %d %d %d\n",
 		di->bat_cap.max_mah_design,
+		di->bat_cap.max_mah,
 		di->bat_cap.mah,
 		di->bat_cap.permille,
 		di->bat_cap.level,
@@ -1982,7 +1979,7 @@
 }
 
 /**
- * ab8500_fg_cc_data_end_handler() - isr to get battery avg current.
+ * ab8500_fg_cc_data_end_handler() - end of data conversion isr.
  * @irq:       interrupt number
  * @_di:       pointer to the ab8500_fg structure
  *
@@ -2002,7 +1999,7 @@
 }
 
 /**
- * ab8500_fg_cc_convend_handler() - isr to get battery avg current.
+ * ab8500_fg_cc_int_calib_handler () - end of calibration isr.
  * @irq:       interrupt number
  * @_di:       pointer to the ab8500_fg structure
  *
@@ -2153,9 +2150,7 @@
 			val->intval = di->bat_cap.prev_mah;
 		break;
 	case POWER_SUPPLY_PROP_CAPACITY:
-		if (di->bm->capacity_scaling)
-			val->intval = di->bat_cap.cap_scale.scaled_cap;
-		else if (di->flags.batt_unknown && !di->bm->chg_unknown_bat &&
+		if (di->flags.batt_unknown && !di->bm->chg_unknown_bat &&
 				di->flags.batt_id_received)
 			val->intval = 100;
 		else
@@ -2344,6 +2339,50 @@
 		dev_err(di->dev, "BattOk init write failed.\n");
 		goto out;
 	}
+
+	if (((is_ab8505(di->parent) || is_ab9540(di->parent)) &&
+			abx500_get_chip_id(di->dev) >= AB8500_CUT2P0)
+			|| is_ab8540(di->parent)) {
+		ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+			AB8505_RTC_PCUT_MAX_TIME_REG, di->bm->fg_params->pcut_max_time);
+
+		if (ret) {
+			dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_MAX_TIME_REG\n", __func__);
+			goto out;
+		};
+
+		ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+			AB8505_RTC_PCUT_FLAG_TIME_REG, di->bm->fg_params->pcut_flag_time);
+
+		if (ret) {
+			dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_FLAG_TIME_REG\n", __func__);
+			goto out;
+		};
+
+		ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+			AB8505_RTC_PCUT_RESTART_REG, di->bm->fg_params->pcut_max_restart);
+
+		if (ret) {
+			dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_RESTART_REG\n", __func__);
+			goto out;
+		};
+
+		ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+			AB8505_RTC_PCUT_DEBOUNCE_REG, di->bm->fg_params->pcut_debounce_time);
+
+		if (ret) {
+			dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_DEBOUNCE_REG\n", __func__);
+			goto out;
+		};
+
+		ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+			AB8505_RTC_PCUT_CTL_STATUS_REG, di->bm->fg_params->pcut_enable);
+
+		if (ret) {
+			dev_err(di->dev, "%s write failed AB8505_RTC_PCUT_CTL_STATUS_REG\n", __func__);
+			goto out;
+		};
+	}
 out:
 	return ret;
 }
@@ -2546,6 +2585,428 @@
 
 	return ret;
 }
+
+static ssize_t ab8505_powercut_flagtime_read(struct device *dev,
+			     struct device_attribute *attr,
+			     char *buf)
+{
+	int ret;
+	u8 reg_value;
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct ab8500_fg *di;
+
+	di = to_ab8500_fg_device_info(psy);
+
+	ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
+		AB8505_RTC_PCUT_FLAG_TIME_REG, &reg_value);
+
+	if (ret < 0) {
+		dev_err(dev, "Failed to read AB8505_RTC_PCUT_FLAG_TIME_REG\n");
+		goto fail;
+	}
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", (reg_value & 0x7F));
+
+fail:
+	return ret;
+}
+
+static ssize_t ab8505_powercut_flagtime_write(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	int ret;
+	long unsigned reg_value;
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct ab8500_fg *di;
+
+	di = to_ab8500_fg_device_info(psy);
+
+	reg_value = simple_strtoul(buf, NULL, 10);
+
+	if (reg_value > 0x7F) {
+		dev_err(dev, "Incorrect parameter, echo 0 (1.98s) - 127 (15.625ms) for flagtime\n");
+		goto fail;
+	}
+
+	ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+		AB8505_RTC_PCUT_FLAG_TIME_REG, (u8)reg_value);
+
+	if (ret < 0)
+		dev_err(dev, "Failed to set AB8505_RTC_PCUT_FLAG_TIME_REG\n");
+
+fail:
+	return count;
+}
+
+static ssize_t ab8505_powercut_maxtime_read(struct device *dev,
+			     struct device_attribute *attr,
+			     char *buf)
+{
+	int ret;
+	u8 reg_value;
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct ab8500_fg *di;
+
+	di = to_ab8500_fg_device_info(psy);
+
+	ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
+		AB8505_RTC_PCUT_MAX_TIME_REG, &reg_value);
+
+	if (ret < 0) {
+		dev_err(dev, "Failed to read AB8505_RTC_PCUT_MAX_TIME_REG\n");
+		goto fail;
+	}
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", (reg_value & 0x7F));
+
+fail:
+	return ret;
+
+}
+
+static ssize_t ab8505_powercut_maxtime_write(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	int ret;
+	int reg_value;
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct ab8500_fg *di;
+
+	di = to_ab8500_fg_device_info(psy);
+
+	reg_value = simple_strtoul(buf, NULL, 10);
+	if (reg_value > 0x7F) {
+		dev_err(dev, "Incorrect parameter, echo 0 (0.0s) - 127 (1.98s) for maxtime\n");
+		goto fail;
+	}
+
+	ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+		AB8505_RTC_PCUT_MAX_TIME_REG, (u8)reg_value);
+
+	if (ret < 0)
+		dev_err(dev, "Failed to set AB8505_RTC_PCUT_MAX_TIME_REG\n");
+
+fail:
+	return count;
+}
+
+static ssize_t ab8505_powercut_restart_read(struct device *dev,
+			     struct device_attribute *attr,
+			     char *buf)
+{
+	int ret;
+	u8 reg_value;
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct ab8500_fg *di;
+
+	di = to_ab8500_fg_device_info(psy);
+
+	ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
+		AB8505_RTC_PCUT_RESTART_REG, &reg_value);
+
+	if (ret < 0) {
+		dev_err(dev, "Failed to read AB8505_RTC_PCUT_RESTART_REG\n");
+		goto fail;
+	}
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", (reg_value & 0xF));
+
+fail:
+	return ret;
+}
+
+static ssize_t ab8505_powercut_restart_write(struct device *dev,
+					     struct device_attribute *attr,
+					     const char *buf, size_t count)
+{
+	int ret;
+	int reg_value;
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct ab8500_fg *di;
+
+	di = to_ab8500_fg_device_info(psy);
+
+	reg_value = simple_strtoul(buf, NULL, 10);
+	if (reg_value > 0xF) {
+		dev_err(dev, "Incorrect parameter, echo 0 - 15 for number of restart\n");
+		goto fail;
+	}
+
+	ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+						AB8505_RTC_PCUT_RESTART_REG, (u8)reg_value);
+
+	if (ret < 0)
+		dev_err(dev, "Failed to set AB8505_RTC_PCUT_RESTART_REG\n");
+
+fail:
+	return count;
+
+}
+
+static ssize_t ab8505_powercut_timer_read(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	int ret;
+	u8 reg_value;
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct ab8500_fg *di;
+
+	di = to_ab8500_fg_device_info(psy);
+
+	ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
+						AB8505_RTC_PCUT_TIME_REG, &reg_value);
+
+	if (ret < 0) {
+		dev_err(dev, "Failed to read AB8505_RTC_PCUT_TIME_REG\n");
+		goto fail;
+	}
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", (reg_value & 0x7F));
+
+fail:
+	return ret;
+}
+
+static ssize_t ab8505_powercut_restart_counter_read(struct device *dev,
+						    struct device_attribute *attr,
+						    char *buf)
+{
+	int ret;
+	u8 reg_value;
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct ab8500_fg *di;
+
+	di = to_ab8500_fg_device_info(psy);
+
+	ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
+						AB8505_RTC_PCUT_RESTART_REG, &reg_value);
+
+	if (ret < 0) {
+		dev_err(dev, "Failed to read AB8505_RTC_PCUT_RESTART_REG\n");
+		goto fail;
+	}
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", (reg_value & 0xF0) >> 4);
+
+fail:
+	return ret;
+}
+
+static ssize_t ab8505_powercut_read(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	int ret;
+	u8 reg_value;
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct ab8500_fg *di;
+
+	di = to_ab8500_fg_device_info(psy);
+
+	ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
+						AB8505_RTC_PCUT_CTL_STATUS_REG, &reg_value);
+
+	if (ret < 0)
+		goto fail;
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", (reg_value & 0x1));
+
+fail:
+	return ret;
+}
+
+static ssize_t ab8505_powercut_write(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t count)
+{
+	int ret;
+	int reg_value;
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct ab8500_fg *di;
+
+	di = to_ab8500_fg_device_info(psy);
+
+	reg_value = simple_strtoul(buf, NULL, 10);
+	if (reg_value > 0x1) {
+		dev_err(dev, "Incorrect parameter, echo 0/1 to disable/enable Pcut feature\n");
+		goto fail;
+	}
+
+	ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+						AB8505_RTC_PCUT_CTL_STATUS_REG, (u8)reg_value);
+
+	if (ret < 0)
+		dev_err(dev, "Failed to set AB8505_RTC_PCUT_CTL_STATUS_REG\n");
+
+fail:
+	return count;
+}
+
+static ssize_t ab8505_powercut_flag_read(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+
+	int ret;
+	u8 reg_value;
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct ab8500_fg *di;
+
+	di = to_ab8500_fg_device_info(psy);
+
+	ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
+						AB8505_RTC_PCUT_CTL_STATUS_REG,  &reg_value);
+
+	if (ret < 0) {
+		dev_err(dev, "Failed to read AB8505_RTC_PCUT_CTL_STATUS_REG\n");
+		goto fail;
+	}
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ((reg_value & 0x10) >> 4));
+
+fail:
+	return ret;
+}
+
+static ssize_t ab8505_powercut_debounce_read(struct device *dev,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	int ret;
+	u8 reg_value;
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct ab8500_fg *di;
+
+	di = to_ab8500_fg_device_info(psy);
+
+	ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
+						AB8505_RTC_PCUT_DEBOUNCE_REG,  &reg_value);
+
+	if (ret < 0) {
+		dev_err(dev, "Failed to read AB8505_RTC_PCUT_DEBOUNCE_REG\n");
+		goto fail;
+	}
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", (reg_value & 0x7));
+
+fail:
+	return ret;
+}
+
+static ssize_t ab8505_powercut_debounce_write(struct device *dev,
+					      struct device_attribute *attr,
+					      const char *buf, size_t count)
+{
+	int ret;
+	int reg_value;
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct ab8500_fg *di;
+
+	di = to_ab8500_fg_device_info(psy);
+
+	reg_value = simple_strtoul(buf, NULL, 10);
+	if (reg_value > 0x7) {
+		dev_err(dev, "Incorrect parameter, echo 0 to 7 for debounce setting\n");
+		goto fail;
+	}
+
+	ret = abx500_set_register_interruptible(di->dev, AB8500_RTC,
+						AB8505_RTC_PCUT_DEBOUNCE_REG, (u8)reg_value);
+
+	if (ret < 0)
+		dev_err(dev, "Failed to set AB8505_RTC_PCUT_DEBOUNCE_REG\n");
+
+fail:
+	return count;
+}
+
+static ssize_t ab8505_powercut_enable_status_read(struct device *dev,
+						  struct device_attribute *attr,
+						  char *buf)
+{
+	int ret;
+	u8 reg_value;
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct ab8500_fg *di;
+
+	di = to_ab8500_fg_device_info(psy);
+
+	ret = abx500_get_register_interruptible(di->dev, AB8500_RTC,
+						AB8505_RTC_PCUT_CTL_STATUS_REG, &reg_value);
+
+	if (ret < 0) {
+		dev_err(dev, "Failed to read AB8505_RTC_PCUT_CTL_STATUS_REG\n");
+		goto fail;
+	}
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", ((reg_value & 0x20) >> 5));
+
+fail:
+	return ret;
+}
+
+static struct device_attribute ab8505_fg_sysfs_psy_attrs[] = {
+	__ATTR(powercut_flagtime, (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8505_powercut_flagtime_read, ab8505_powercut_flagtime_write),
+	__ATTR(powercut_maxtime, (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8505_powercut_maxtime_read, ab8505_powercut_maxtime_write),
+	__ATTR(powercut_restart_max, (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8505_powercut_restart_read, ab8505_powercut_restart_write),
+	__ATTR(powercut_timer, S_IRUGO, ab8505_powercut_timer_read, NULL),
+	__ATTR(powercut_restart_counter, S_IRUGO,
+		ab8505_powercut_restart_counter_read, NULL),
+	__ATTR(powercut_enable, (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8505_powercut_read, ab8505_powercut_write),
+	__ATTR(powercut_flag, S_IRUGO, ab8505_powercut_flag_read, NULL),
+	__ATTR(powercut_debounce_time, (S_IRUGO | S_IWUSR | S_IWGRP),
+		ab8505_powercut_debounce_read, ab8505_powercut_debounce_write),
+	__ATTR(powercut_enable_status, S_IRUGO,
+		ab8505_powercut_enable_status_read, NULL),
+};
+
+static int ab8500_fg_sysfs_psy_create_attrs(struct device *dev)
+{
+	unsigned int i, j;
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct ab8500_fg *di;
+
+	di = to_ab8500_fg_device_info(psy);
+
+	if (((is_ab8505(di->parent) || is_ab9540(di->parent)) &&
+	     abx500_get_chip_id(dev->parent) >= AB8500_CUT2P0)
+	    || is_ab8540(di->parent)) {
+		for (j = 0; j < ARRAY_SIZE(ab8505_fg_sysfs_psy_attrs); j++)
+			if (device_create_file(dev, &ab8505_fg_sysfs_psy_attrs[j]))
+				goto sysfs_psy_create_attrs_failed_ab8505;
+	}
+	return 0;
+sysfs_psy_create_attrs_failed_ab8505:
+	dev_err(dev, "Failed creating sysfs psy attrs for ab8505.\n");
+	while (j--)
+		device_remove_file(dev, &ab8505_fg_sysfs_psy_attrs[i]);
+
+	return -EIO;
+}
+
+static void ab8500_fg_sysfs_psy_remove_attrs(struct device *dev)
+{
+	unsigned int i;
+	struct power_supply *psy = dev_get_drvdata(dev);
+	struct ab8500_fg *di;
+
+	di = to_ab8500_fg_device_info(psy);
+
+	if (((is_ab8505(di->parent) || is_ab9540(di->parent)) &&
+	     abx500_get_chip_id(dev->parent) >= AB8500_CUT2P0)
+	    || is_ab8540(di->parent)) {
+		for (i = 0; i < ARRAY_SIZE(ab8505_fg_sysfs_psy_attrs); i++)
+			(void)device_remove_file(dev, &ab8505_fg_sysfs_psy_attrs[i]);
+	}
+}
+
 /* Exposure to the sysfs interface <<END>> */
 
 #if defined(CONFIG_PM)
@@ -2607,6 +3068,7 @@
 	ab8500_fg_sysfs_exit(di);
 
 	flush_scheduled_work();
+	ab8500_fg_sysfs_psy_remove_attrs(di->fg_psy.dev);
 	power_supply_unregister(&di->fg_psy);
 	platform_set_drvdata(pdev, NULL);
 	return ret;
@@ -2772,6 +3234,13 @@
 		goto free_irq;
 	}
 
+	ret = ab8500_fg_sysfs_psy_create_attrs(di->fg_psy.dev);
+	if (ret) {
+		dev_err(di->dev, "failed to create FG psy\n");
+		ab8500_fg_sysfs_exit(di);
+		goto free_irq;
+	}
+
 	/* Calibrate the fg first time */
 	di->flags.calibrate = true;
 	di->calib_state = AB8500_FG_CALIB_INIT;
diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c
index f043c08..9863e42 100644
--- a/drivers/power/abx500_chargalg.c
+++ b/drivers/power/abx500_chargalg.c
@@ -1,5 +1,6 @@
 /*
  * Copyright (C) ST-Ericsson SA 2012
+ * Copyright (c) 2012 Sony Mobile Communications AB
  *
  * Charging algorithm driver for abx500 variants
  *
@@ -8,11 +9,13 @@
  *	Johan Palsson <johan.palsson@stericsson.com>
  *	Karl Komierowski <karl.komierowski@stericsson.com>
  *	Arun R Murthy <arun.murthy@stericsson.com>
+ *	Author: Imre Sunyi <imre.sunyi@sonymobile.com>
  */
 
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/device.h>
+#include <linux/hrtimer.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
@@ -24,8 +27,10 @@
 #include <linux/of.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
 #include <linux/mfd/abx500/ux500_chargalg.h>
 #include <linux/mfd/abx500/ab8500-bm.h>
+#include <linux/notifier.h>
 
 /* Watchdog kick interval */
 #define CHG_WD_INTERVAL			(6 * HZ)
@@ -33,6 +38,18 @@
 /* End-of-charge criteria counter */
 #define EOC_COND_CNT			10
 
+/* One hour expressed in seconds */
+#define ONE_HOUR_IN_SECONDS            3600
+
+/* Five minutes expressed in seconds */
+#define FIVE_MINUTES_IN_SECONDS        300
+
+/* Plus margin for the low battery threshold */
+#define BAT_PLUS_MARGIN                (100)
+
+#define CHARGALG_CURR_STEP_LOW		0
+#define CHARGALG_CURR_STEP_HIGH	100
+
 #define to_abx500_chargalg_device_info(x) container_of((x), \
 	struct abx500_chargalg, chargalg_psy);
 
@@ -66,6 +83,11 @@
 	bool usb_suspended;
 };
 
+struct abx500_chargalg_current_step_status {
+	bool curr_step_change;
+	int curr_step;
+};
+
 struct abx500_chargalg_battery_data {
 	int temp;
 	int volt;
@@ -82,6 +104,7 @@
 	STATE_HW_TEMP_PROTECT_INIT,
 	STATE_HW_TEMP_PROTECT,
 	STATE_NORMAL_INIT,
+	STATE_USB_PP_PRE_CHARGE,
 	STATE_NORMAL,
 	STATE_WAIT_FOR_RECHARGE_INIT,
 	STATE_WAIT_FOR_RECHARGE,
@@ -113,6 +136,7 @@
 	"HW_TEMP_PROTECT_INIT",
 	"HW_TEMP_PROTECT",
 	"NORMAL_INIT",
+	"USB_PP_PRE_CHARGE",
 	"NORMAL",
 	"WAIT_FOR_RECHARGE_INIT",
 	"WAIT_FOR_RECHARGE",
@@ -204,6 +228,8 @@
  * @batt_data:		data of the battery
  * @susp_status:	current charger suspension status
  * @bm:           	Platform specific battery management information
+ * @curr_status:	Current step status for over-current protection
+ * @parent:		pointer to the struct abx500
  * @chargalg_psy:	structure that holds the battery properties exposed by
  *			the charging algorithm
  * @events:		structure for information about events triggered
@@ -227,6 +253,8 @@
 	struct abx500_chargalg_charger_info chg_info;
 	struct abx500_chargalg_battery_data batt_data;
 	struct abx500_chargalg_suspension_status susp_status;
+	struct ab8500 *parent;
+	struct abx500_chargalg_current_step_status curr_status;
 	struct abx500_bm_data *bm;
 	struct power_supply chargalg_psy;
 	struct ux500_charger *ac_chg;
@@ -236,51 +264,69 @@
 	struct delayed_work chargalg_periodic_work;
 	struct delayed_work chargalg_wd_work;
 	struct work_struct chargalg_work;
-	struct timer_list safety_timer;
-	struct timer_list maintenance_timer;
+	struct hrtimer safety_timer;
+	struct hrtimer maintenance_timer;
 	struct kobject chargalg_kobject;
 };
 
+/*External charger prepare notifier*/
+BLOCKING_NOTIFIER_HEAD(charger_notifier_list);
+
 /* Main battery properties */
 static enum power_supply_property abx500_chargalg_props[] = {
 	POWER_SUPPLY_PROP_STATUS,
 	POWER_SUPPLY_PROP_HEALTH,
 };
 
+struct abx500_chargalg_sysfs_entry {
+	struct attribute attr;
+	ssize_t (*show)(struct abx500_chargalg *, char *);
+	ssize_t (*store)(struct abx500_chargalg *, const char *, size_t);
+};
+
 /**
  * abx500_chargalg_safety_timer_expired() - Expiration of the safety timer
- * @data:	pointer to the abx500_chargalg structure
+ * @timer:     pointer to the hrtimer structure
  *
  * This function gets called when the safety timer for the charger
  * expires
  */
-static void abx500_chargalg_safety_timer_expired(unsigned long data)
+static enum hrtimer_restart
+abx500_chargalg_safety_timer_expired(struct hrtimer *timer)
 {
-	struct abx500_chargalg *di = (struct abx500_chargalg *) data;
+	struct abx500_chargalg *di = container_of(timer, struct abx500_chargalg,
+						  safety_timer);
 	dev_err(di->dev, "Safety timer expired\n");
 	di->events.safety_timer_expired = true;
 
 	/* Trigger execution of the algorithm instantly */
 	queue_work(di->chargalg_wq, &di->chargalg_work);
+
+	return HRTIMER_NORESTART;
 }
 
 /**
  * abx500_chargalg_maintenance_timer_expired() - Expiration of
  * the maintenance timer
- * @i:		pointer to the abx500_chargalg structure
+ * @timer:     pointer to the timer structure
  *
  * This function gets called when the maintenence timer
  * expires
  */
-static void abx500_chargalg_maintenance_timer_expired(unsigned long data)
+static enum hrtimer_restart
+abx500_chargalg_maintenance_timer_expired(struct hrtimer *timer)
 {
 
-	struct abx500_chargalg *di = (struct abx500_chargalg *) data;
+	struct abx500_chargalg *di = container_of(timer, struct abx500_chargalg,
+						  maintenance_timer);
+
 	dev_dbg(di->dev, "Maintenance timer expired\n");
 	di->events.maintenance_timer_expired = true;
 
 	/* Trigger execution of the algorithm instantly */
 	queue_work(di->chargalg_wq, &di->chargalg_work);
+
+	return HRTIMER_NORESTART;
 }
 
 /**
@@ -303,6 +349,30 @@
 	di->charge_state = state;
 }
 
+static int abx500_chargalg_check_charger_enable(struct abx500_chargalg *di)
+{
+	switch (di->charge_state) {
+	case STATE_NORMAL:
+	case STATE_MAINTENANCE_A:
+	case STATE_MAINTENANCE_B:
+		break;
+	default:
+		return 0;
+	}
+
+	if (di->chg_info.charger_type & USB_CHG) {
+		return di->usb_chg->ops.check_enable(di->usb_chg,
+                         di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
+                         di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+	} else if ((di->chg_info.charger_type & AC_CHG) &&
+		   !(di->ac_chg->external)) {
+		return di->ac_chg->ops.check_enable(di->ac_chg,
+                         di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
+                         di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+	}
+	return 0;
+}
+
 /**
  * abx500_chargalg_check_charger_connection() - Check charger connection change
  * @di:		pointer to the abx500_chargalg structure
@@ -348,6 +418,22 @@
 }
 
 /**
+ * abx500_chargalg_check_current_step_status() - Check charging current
+ * step status.
+ * @di:		pointer to the abx500_chargalg structure
+ *
+ * This function will check if there is a change in the charging current step
+ * and change charge state accordingly.
+ */
+static void abx500_chargalg_check_current_step_status
+	(struct abx500_chargalg *di)
+{
+	if (di->curr_status.curr_step_change)
+		abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+	di->curr_status.curr_step_change = false;
+}
+
+/**
  * abx500_chargalg_start_safety_timer() - Start charging safety timer
  * @di:		pointer to the abx500_chargalg structure
  *
@@ -356,19 +442,16 @@
  */
 static void abx500_chargalg_start_safety_timer(struct abx500_chargalg *di)
 {
-	unsigned long timer_expiration = 0;
+	/* Charger-dependent expiration time in hours*/
+	int timer_expiration = 0;
 
 	switch (di->chg_info.charger_type) {
 	case AC_CHG:
-		timer_expiration =
-		round_jiffies(jiffies +
-			(di->bm->main_safety_tmr_h * 3600 * HZ));
+		timer_expiration = di->bm->main_safety_tmr_h;
 		break;
 
 	case USB_CHG:
-		timer_expiration =
-		round_jiffies(jiffies +
-			(di->bm->usb_safety_tmr_h * 3600 * HZ));
+		timer_expiration = di->bm->usb_safety_tmr_h;
 		break;
 
 	default:
@@ -377,11 +460,10 @@
 	}
 
 	di->events.safety_timer_expired = false;
-	di->safety_timer.expires = timer_expiration;
-	if (!timer_pending(&di->safety_timer))
-		add_timer(&di->safety_timer);
-	else
-		mod_timer(&di->safety_timer, timer_expiration);
+	hrtimer_set_expires_range(&di->safety_timer,
+		ktime_set(timer_expiration * ONE_HOUR_IN_SECONDS, 0),
+		ktime_set(FIVE_MINUTES_IN_SECONDS, 0));
+	hrtimer_start_expires(&di->safety_timer, HRTIMER_MODE_REL);
 }
 
 /**
@@ -392,8 +474,8 @@
  */
 static void abx500_chargalg_stop_safety_timer(struct abx500_chargalg *di)
 {
-	di->events.safety_timer_expired = false;
-	del_timer(&di->safety_timer);
+	if (hrtimer_try_to_cancel(&di->safety_timer) >= 0)
+		di->events.safety_timer_expired = false;
 }
 
 /**
@@ -408,17 +490,11 @@
 static void abx500_chargalg_start_maintenance_timer(struct abx500_chargalg *di,
 	int duration)
 {
-	unsigned long timer_expiration;
-
-	/* Convert from hours to jiffies */
-	timer_expiration = round_jiffies(jiffies + (duration * 3600 * HZ));
-
+	hrtimer_set_expires_range(&di->maintenance_timer,
+		ktime_set(duration * ONE_HOUR_IN_SECONDS, 0),
+		ktime_set(FIVE_MINUTES_IN_SECONDS, 0));
 	di->events.maintenance_timer_expired = false;
-	di->maintenance_timer.expires = timer_expiration;
-	if (!timer_pending(&di->maintenance_timer))
-		add_timer(&di->maintenance_timer);
-	else
-		mod_timer(&di->maintenance_timer, timer_expiration);
+	hrtimer_start_expires(&di->maintenance_timer, HRTIMER_MODE_REL);
 }
 
 /**
@@ -430,8 +506,8 @@
  */
 static void abx500_chargalg_stop_maintenance_timer(struct abx500_chargalg *di)
 {
-	di->events.maintenance_timer_expired = false;
-	del_timer(&di->maintenance_timer);
+	if (hrtimer_try_to_cancel(&di->maintenance_timer) >= 0)
+		di->events.maintenance_timer_expired = false;
 }
 
 /**
@@ -477,6 +553,8 @@
 static int abx500_chargalg_ac_en(struct abx500_chargalg *di, int enable,
 	int vset, int iset)
 {
+	static int abx500_chargalg_ex_ac_enable_toggle;
+
 	if (!di->ac_chg || !di->ac_chg->ops.enable)
 		return -ENXIO;
 
@@ -489,6 +567,14 @@
 	di->chg_info.ac_iset = iset;
 	di->chg_info.ac_vset = vset;
 
+	/* Enable external charger */
+	if (enable && di->ac_chg->external &&
+	    !abx500_chargalg_ex_ac_enable_toggle) {
+		blocking_notifier_call_chain(&charger_notifier_list,
+					     0, di->dev);
+		abx500_chargalg_ex_ac_enable_toggle++;
+	}
+
 	return di->ac_chg->ops.enable(di->ac_chg, enable, vset, iset);
 }
 
@@ -520,6 +606,37 @@
 	return di->usb_chg->ops.enable(di->usb_chg, enable, vset, iset);
 }
 
+ /**
+ * ab8540_chargalg_usb_pp_en() - Enable/ disable USB power path
+ * @di:                pointer to the abx500_chargalg structure
+ * @enable:    power path enable/disable
+ *
+ * The USB power path will be enable/ disable
+ */
+static int ab8540_chargalg_usb_pp_en(struct abx500_chargalg *di, bool enable)
+{
+	if (!di->usb_chg || !di->usb_chg->ops.pp_enable)
+		return -ENXIO;
+
+	return di->usb_chg->ops.pp_enable(di->usb_chg, enable);
+}
+
+/**
+ * ab8540_chargalg_usb_pre_chg_en() - Enable/ disable USB pre-charge
+ * @di:                pointer to the abx500_chargalg structure
+ * @enable:    USB pre-charge enable/disable
+ *
+ * The USB USB pre-charge will be enable/ disable
+ */
+static int ab8540_chargalg_usb_pre_chg_en(struct abx500_chargalg *di,
+					  bool enable)
+{
+	if (!di->usb_chg || !di->usb_chg->ops.pre_chg_enable)
+		return -ENXIO;
+
+	return di->usb_chg->ops.pre_chg_enable(di->usb_chg, enable);
+}
+
 /**
  * abx500_chargalg_update_chg_curr() - Update charger current
  * @di:		pointer to the abx500_chargalg structure
@@ -613,8 +730,6 @@
 static void abx500_chargalg_start_charging(struct abx500_chargalg *di,
 	int vset, int iset)
 {
-	bool start_chargalg_wd = true;
-
 	switch (di->chg_info.charger_type) {
 	case AC_CHG:
 		dev_dbg(di->dev,
@@ -632,12 +747,8 @@
 
 	default:
 		dev_err(di->dev, "Unknown charger to charge from\n");
-		start_chargalg_wd = false;
 		break;
 	}
-
-	if (start_chargalg_wd && !delayed_work_pending(&di->chargalg_wd_work))
-		queue_delayed_work(di->chargalg_wq, &di->chargalg_wd_work, 0);
 }
 
 /**
@@ -725,6 +836,9 @@
 		di->batt_data.avg_curr > 0) {
 		if (++di->eoc_cnt >= EOC_COND_CNT) {
 			di->eoc_cnt = 0;
+			if ((di->chg_info.charger_type & USB_CHG) &&
+			   (di->usb_chg->power_path))
+				ab8540_chargalg_usb_pp_en(di, true);
 			di->charge_status = POWER_SUPPLY_STATUS_FULL;
 			di->maintenance_chg = true;
 			dev_dbg(di->dev, "EOC reached!\n");
@@ -1217,6 +1331,8 @@
 static void abx500_chargalg_algorithm(struct abx500_chargalg *di)
 {
 	int charger_status;
+	int ret;
+	int curr_step_lvl;
 
 	/* Collect data from all power_supply class devices */
 	class_for_each_device(power_supply_class, NULL,
@@ -1227,6 +1343,15 @@
 	abx500_chargalg_check_charger_voltage(di);
 
 	charger_status = abx500_chargalg_check_charger_connection(di);
+	abx500_chargalg_check_current_step_status(di);
+
+	if (is_ab8500(di->parent)) {
+		ret = abx500_chargalg_check_charger_enable(di);
+		if (ret < 0)
+			dev_err(di->dev, "Checking charger is enabled error"
+					": Returned Value %d\n", ret);
+	}
+
 	/*
 	 * First check if we have a charger connected.
 	 * Also we don't allow charging of unknown batteries if configured
@@ -1416,9 +1541,34 @@
 		break;
 
 	case STATE_NORMAL_INIT:
-		abx500_chargalg_start_charging(di,
-			di->bm->bat_type[di->bm->batt_id].normal_vol_lvl,
-			di->bm->bat_type[di->bm->batt_id].normal_cur_lvl);
+		if ((di->chg_info.charger_type & USB_CHG) &&
+				di->usb_chg->power_path) {
+			if (di->batt_data.volt >
+			    (di->bm->fg_params->lowbat_threshold +
+			     BAT_PLUS_MARGIN)) {
+				ab8540_chargalg_usb_pre_chg_en(di, false);
+				ab8540_chargalg_usb_pp_en(di, false);
+			} else {
+				ab8540_chargalg_usb_pp_en(di, true);
+				ab8540_chargalg_usb_pre_chg_en(di, true);
+				abx500_chargalg_state_to(di,
+					STATE_USB_PP_PRE_CHARGE);
+				break;
+			}
+		}
+
+		if (di->curr_status.curr_step == CHARGALG_CURR_STEP_LOW)
+			abx500_chargalg_stop_charging(di);
+		else {
+			curr_step_lvl = di->bm->bat_type[
+				di->bm->batt_id].normal_cur_lvl
+				* di->curr_status.curr_step
+				/ CHARGALG_CURR_STEP_HIGH;
+			abx500_chargalg_start_charging(di,
+				di->bm->bat_type[di->bm->batt_id]
+				.normal_vol_lvl, curr_step_lvl);
+		}
+
 		abx500_chargalg_state_to(di, STATE_NORMAL);
 		abx500_chargalg_start_safety_timer(di);
 		abx500_chargalg_stop_maintenance_timer(di);
@@ -1430,6 +1580,13 @@
 
 		break;
 
+	case STATE_USB_PP_PRE_CHARGE:
+		if (di->batt_data.volt >
+			(di->bm->fg_params->lowbat_threshold +
+			BAT_PLUS_MARGIN))
+			abx500_chargalg_state_to(di, STATE_NORMAL_INIT);
+		break;
+
 	case STATE_NORMAL:
 		handle_maxim_chg_curr(di);
 		if (di->charge_status == POWER_SUPPLY_STATUS_FULL &&
@@ -1653,99 +1810,134 @@
 
 /* Exposure to the sysfs interface */
 
-/**
- * abx500_chargalg_sysfs_show() - sysfs show operations
- * @kobj:      pointer to the struct kobject
- * @attr:      pointer to the struct attribute
- * @buf:       buffer that holds the parameter to send to userspace
- *
- * Returns a buffer to be displayed in user space
- */
-static ssize_t abx500_chargalg_sysfs_show(struct kobject *kobj,
-					  struct attribute *attr, char *buf)
+static ssize_t abx500_chargalg_curr_step_show(struct abx500_chargalg *di,
+					      char *buf)
 {
-	struct abx500_chargalg *di = container_of(kobj,
-               struct abx500_chargalg, chargalg_kobject);
+	return sprintf(buf, "%d\n", di->curr_status.curr_step);
+}
 
+static ssize_t abx500_chargalg_curr_step_store(struct abx500_chargalg *di,
+					       const char *buf, size_t length)
+{
+	long int param;
+	int ret;
+
+	ret = kstrtol(buf, 10, &param);
+	if (ret < 0)
+		return ret;
+
+	di->curr_status.curr_step = param;
+	if (di->curr_status.curr_step >= CHARGALG_CURR_STEP_LOW &&
+		di->curr_status.curr_step <= CHARGALG_CURR_STEP_HIGH) {
+		di->curr_status.curr_step_change = true;
+		queue_work(di->chargalg_wq, &di->chargalg_work);
+	} else
+		dev_info(di->dev, "Wrong current step\n"
+			"Enter 0. Disable AC/USB Charging\n"
+			"1--100. Set AC/USB charging current step\n"
+			"100. Enable AC/USB Charging\n");
+
+	return strlen(buf);
+}
+
+
+static ssize_t abx500_chargalg_en_show(struct abx500_chargalg *di,
+				       char *buf)
+{
 	return sprintf(buf, "%d\n",
 		       di->susp_status.ac_suspended &&
 		       di->susp_status.usb_suspended);
 }
 
-/**
- * abx500_chargalg_sysfs_charger() - sysfs store operations
- * @kobj:      pointer to the struct kobject
- * @attr:      pointer to the struct attribute
- * @buf:       buffer that holds the parameter passed from userspace
- * @length:    length of the parameter passed
- *
- * Returns length of the buffer(input taken from user space) on success
- * else error code on failure
- * The operation to be performed on passing the parameters from the user space.
- */
-static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj,
-	struct attribute *attr, const char *buf, size_t length)
+static ssize_t abx500_chargalg_en_store(struct abx500_chargalg *di,
+	const char *buf, size_t length)
 {
-	struct abx500_chargalg *di = container_of(kobj,
-		struct abx500_chargalg, chargalg_kobject);
 	long int param;
 	int ac_usb;
 	int ret;
-	char entry = *attr->name;
 
-	switch (entry) {
-	case 'c':
-		ret = strict_strtol(buf, 10, &param);
-		if (ret < 0)
-			return ret;
+	ret = kstrtol(buf, 10, &param);
+	if (ret < 0)
+		return ret;
 
-		ac_usb = param;
-		switch (ac_usb) {
-		case 0:
-			/* Disable charging */
-			di->susp_status.ac_suspended = true;
-			di->susp_status.usb_suspended = true;
-			di->susp_status.suspended_change = true;
-			/* Trigger a state change */
-			queue_work(di->chargalg_wq,
-				&di->chargalg_work);
-			break;
-		case 1:
-			/* Enable AC Charging */
-			di->susp_status.ac_suspended = false;
-			di->susp_status.suspended_change = true;
-			/* Trigger a state change */
-			queue_work(di->chargalg_wq,
-				&di->chargalg_work);
-			break;
-		case 2:
-			/* Enable USB charging */
-			di->susp_status.usb_suspended = false;
-			di->susp_status.suspended_change = true;
-			/* Trigger a state change */
-			queue_work(di->chargalg_wq,
-				&di->chargalg_work);
-			break;
-		default:
-			dev_info(di->dev, "Wrong input\n"
-				"Enter 0. Disable AC/USB Charging\n"
-				"1. Enable AC charging\n"
-				"2. Enable USB Charging\n");
-		};
+	ac_usb = param;
+	switch (ac_usb) {
+	case 0:
+		/* Disable charging */
+		di->susp_status.ac_suspended = true;
+		di->susp_status.usb_suspended = true;
+		di->susp_status.suspended_change = true;
+		/* Trigger a state change */
+		queue_work(di->chargalg_wq,
+			&di->chargalg_work);
 		break;
+	case 1:
+		/* Enable AC Charging */
+		di->susp_status.ac_suspended = false;
+		di->susp_status.suspended_change = true;
+		/* Trigger a state change */
+		queue_work(di->chargalg_wq,
+			&di->chargalg_work);
+		break;
+	case 2:
+		/* Enable USB charging */
+		di->susp_status.usb_suspended = false;
+		di->susp_status.suspended_change = true;
+		/* Trigger a state change */
+		queue_work(di->chargalg_wq,
+			&di->chargalg_work);
+		break;
+	default:
+		dev_info(di->dev, "Wrong input\n"
+			"Enter 0. Disable AC/USB Charging\n"
+			"1. Enable AC charging\n"
+			"2. Enable USB Charging\n");
 	};
 	return strlen(buf);
 }
 
-static struct attribute abx500_chargalg_en_charger = \
+static struct abx500_chargalg_sysfs_entry abx500_chargalg_en_charger =
+	__ATTR(chargalg, 0644, abx500_chargalg_en_show,
+				abx500_chargalg_en_store);
+
+static struct abx500_chargalg_sysfs_entry abx500_chargalg_curr_step =
+	__ATTR(chargalg_curr_step, 0644, abx500_chargalg_curr_step_show,
+					abx500_chargalg_curr_step_store);
+
+static ssize_t abx500_chargalg_sysfs_show(struct kobject *kobj,
+	struct attribute *attr, char *buf)
 {
-	.name = "chargalg",
-	.mode = S_IRUGO | S_IWUSR,
-};
+	struct abx500_chargalg_sysfs_entry *entry = container_of(attr,
+		struct abx500_chargalg_sysfs_entry, attr);
+
+	struct abx500_chargalg *di = container_of(kobj,
+		struct abx500_chargalg, chargalg_kobject);
+
+	if (!entry->show)
+		return -EIO;
+
+	return entry->show(di, buf);
+}
+
+static ssize_t abx500_chargalg_sysfs_charger(struct kobject *kobj,
+	struct attribute *attr, const char *buf, size_t length)
+{
+	struct abx500_chargalg_sysfs_entry *entry = container_of(attr,
+		struct abx500_chargalg_sysfs_entry, attr);
+
+	struct abx500_chargalg *di = container_of(kobj,
+		struct abx500_chargalg, chargalg_kobject);
+
+	if (!entry->store)
+		return -EIO;
+
+	return entry->store(di, buf, length);
+}
 
 static struct attribute *abx500_chargalg_chg[] = {
-	&abx500_chargalg_en_charger,
-	NULL
+	&abx500_chargalg_en_charger.attr,
+	&abx500_chargalg_curr_step.attr,
+	NULL,
 };
 
 static const struct sysfs_ops abx500_chargalg_sysfs_ops = {
@@ -1832,10 +2024,16 @@
 	/* sysfs interface to enable/disbale charging from user space */
 	abx500_chargalg_sysfs_exit(di);
 
+	hrtimer_cancel(&di->safety_timer);
+	hrtimer_cancel(&di->maintenance_timer);
+
+	cancel_delayed_work_sync(&di->chargalg_periodic_work);
+	cancel_delayed_work_sync(&di->chargalg_wd_work);
+	cancel_work_sync(&di->chargalg_work);
+
 	/* Delete the work queue */
 	destroy_workqueue(di->chargalg_wq);
 
-	flush_scheduled_work();
 	power_supply_unregister(&di->chargalg_psy);
 	platform_set_drvdata(pdev, NULL);
 
@@ -1873,8 +2071,9 @@
 		}
 	}
 
-	/* get device struct */
+	/* get device struct and parent */
 	di->dev = &pdev->dev;
+	di->parent = dev_get_drvdata(pdev->dev.parent);
 
 	/* chargalg supply */
 	di->chargalg_psy.name = "abx500_chargalg";
@@ -1888,15 +2087,13 @@
 		abx500_chargalg_external_power_changed;
 
 	/* Initilialize safety timer */
-	init_timer(&di->safety_timer);
+	hrtimer_init(&di->safety_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
 	di->safety_timer.function = abx500_chargalg_safety_timer_expired;
-	di->safety_timer.data = (unsigned long) di;
 
 	/* Initilialize maintenance timer */
-	init_timer(&di->maintenance_timer);
+	hrtimer_init(&di->maintenance_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
 	di->maintenance_timer.function =
 		abx500_chargalg_maintenance_timer_expired;
-	di->maintenance_timer.data = (unsigned long) di;
 
 	/* Create a work queue for the chargalg */
 	di->chargalg_wq =
@@ -1933,6 +2130,7 @@
 		dev_err(di->dev, "failed to create sysfs entry\n");
 		goto free_psy;
 	}
+	di->curr_status.curr_step = CHARGALG_CURR_STEP_HIGH;
 
 	/* Run the charging algorithm */
 	queue_delayed_work(di->chargalg_wq, &di->chargalg_periodic_work, 0);
@@ -1964,18 +2162,7 @@
 	},
 };
 
-static int __init abx500_chargalg_init(void)
-{
-	return platform_driver_register(&abx500_chargalg_driver);
-}
-
-static void __exit abx500_chargalg_exit(void)
-{
-	platform_driver_unregister(&abx500_chargalg_driver);
-}
-
-module_init(abx500_chargalg_init);
-module_exit(abx500_chargalg_exit);
+module_platform_driver(abx500_chargalg_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Johan Palsson, Karl Komierowski");
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 8acc3f8..fefc39f 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -1485,13 +1485,12 @@
 
 	/* Basic Values. Unspecified are Null or 0 */
 	cm->dev = &pdev->dev;
-	cm->desc = kzalloc(sizeof(struct charger_desc), GFP_KERNEL);
+	cm->desc = kmemdup(desc, sizeof(struct charger_desc), GFP_KERNEL);
 	if (!cm->desc) {
 		dev_err(&pdev->dev, "Cannot allocate memory.\n");
 		ret = -ENOMEM;
 		goto err_alloc_desc;
 	}
-	memcpy(cm->desc, desc, sizeof(struct charger_desc));
 	cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */
 
 	/*
diff --git a/drivers/power/da9030_battery.c b/drivers/power/da9030_battery.c
index e8c5a39..ae6c418 100644
--- a/drivers/power/da9030_battery.c
+++ b/drivers/power/da9030_battery.c
@@ -505,7 +505,7 @@
 	    pdata->charge_millivolt > 4350)
 		return -EINVAL;
 
-	charger = kzalloc(sizeof(*charger), GFP_KERNEL);
+	charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
 	if (charger == NULL)
 		return -ENOMEM;
 
@@ -557,8 +557,6 @@
 	cancel_delayed_work(&charger->work);
 
 err_charger_init:
-	kfree(charger);
-
 	return ret;
 }
 
@@ -575,8 +573,6 @@
 	da9030_set_charge(charger, 0);
 	power_supply_unregister(&charger->psy);
 
-	kfree(charger);
-
 	return 0;
 }
 
diff --git a/drivers/power/da9052-battery.c b/drivers/power/da9052-battery.c
index 08193fe..f8f4c0f 100644
--- a/drivers/power/da9052-battery.c
+++ b/drivers/power/da9052-battery.c
@@ -594,7 +594,8 @@
 	int ret;
 	int i;
 
-	bat = kzalloc(sizeof(struct da9052_battery), GFP_KERNEL);
+	bat = devm_kzalloc(&pdev->dev, sizeof(struct da9052_battery),
+				GFP_KERNEL);
 	if (!bat)
 		return -ENOMEM;
 
@@ -635,7 +636,6 @@
 	while (--i >= 0)
 		da9052_free_irq(bat->da9052, da9052_bat_irq_bits[i], bat);
 
-	kfree(bat);
 	return ret;
 }
 static int da9052_bat_remove(struct platform_device *pdev)
@@ -647,7 +647,6 @@
 		da9052_free_irq(bat->da9052, da9052_bat_irq_bits[i], bat);
 
 	power_supply_unregister(&bat->psy);
-	kfree(bat);
 
 	return 0;
 }
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index 704e652..85b4e6e 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -512,7 +512,7 @@
 	int retval = 0;
 	struct ds2760_device_info *di;
 
-	di = kzalloc(sizeof(*di), GFP_KERNEL);
+	di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
 	if (!di) {
 		retval = -ENOMEM;
 		goto di_alloc_failed;
@@ -576,7 +576,6 @@
 workqueue_failed:
 	power_supply_unregister(&di->bat);
 batt_failed:
-	kfree(di);
 di_alloc_failed:
 success:
 	return retval;
@@ -590,7 +589,6 @@
 	cancel_delayed_work_sync(&di->set_charged_work);
 	destroy_workqueue(di->monitor_wqueue);
 	power_supply_unregister(&di->bat);
-	kfree(di);
 
 	return 0;
 }
diff --git a/drivers/power/ds2780_battery.c b/drivers/power/ds2780_battery.c
index 8b6c453..9f418fa 100644
--- a/drivers/power/ds2780_battery.c
+++ b/drivers/power/ds2780_battery.c
@@ -760,7 +760,7 @@
 	int ret = 0;
 	struct ds2780_device_info *dev_info;
 
-	dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
+	dev_info = devm_kzalloc(&pdev->dev, sizeof(*dev_info), GFP_KERNEL);
 	if (!dev_info) {
 		ret = -ENOMEM;
 		goto fail;
@@ -779,7 +779,7 @@
 	ret = power_supply_register(&pdev->dev, &dev_info->bat);
 	if (ret) {
 		dev_err(dev_info->dev, "failed to register battery\n");
-		goto fail_free_info;
+		goto fail;
 	}
 
 	ret = sysfs_create_group(&dev_info->bat.dev->kobj, &ds2780_attr_group);
@@ -813,8 +813,6 @@
 	sysfs_remove_group(&dev_info->bat.dev->kobj, &ds2780_attr_group);
 fail_unregister:
 	power_supply_unregister(&dev_info->bat);
-fail_free_info:
-	kfree(dev_info);
 fail:
 	return ret;
 }
@@ -828,7 +826,6 @@
 
 	power_supply_unregister(&dev_info->bat);
 
-	kfree(dev_info);
 	return 0;
 }
 
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index c09e772..5631748 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -332,32 +332,32 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 
-static int ds278x_suspend(struct i2c_client *client,
-		pm_message_t state)
+static int ds278x_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct ds278x_info *info = i2c_get_clientdata(client);
 
 	cancel_delayed_work(&info->bat_work);
 	return 0;
 }
 
-static int ds278x_resume(struct i2c_client *client)
+static int ds278x_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct ds278x_info *info = i2c_get_clientdata(client);
 
 	schedule_delayed_work(&info->bat_work, DS278x_DELAY);
 	return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(ds278x_battery_pm_ops, ds278x_suspend, ds278x_resume);
+#define DS278X_BATTERY_PM_OPS (&ds278x_battery_pm_ops)
+
 #else
-
-#define ds278x_suspend NULL
-#define ds278x_resume NULL
-
-#endif /* CONFIG_PM */
-
+#define DS278X_BATTERY_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
 
 enum ds278x_num_id {
 	DS2782 = 0,
@@ -460,11 +460,10 @@
 static struct i2c_driver ds278x_battery_driver = {
 	.driver 	= {
 		.name	= "ds2782-battery",
+		.pm	= DS278X_BATTERY_PM_OPS,
 	},
 	.probe		= ds278x_battery_probe,
 	.remove		= ds278x_battery_remove,
-	.suspend	= ds278x_suspend,
-	.resume		= ds278x_resume,
 	.id_table	= ds278x_id,
 };
 module_i2c_driver(ds278x_battery_driver);
diff --git a/drivers/power/goldfish_battery.c b/drivers/power/goldfish_battery.c
index c10f460..29eba88 100644
--- a/drivers/power/goldfish_battery.c
+++ b/drivers/power/goldfish_battery.c
@@ -178,7 +178,7 @@
 		return -ENODEV;
 	}
 
-	data->reg_base = devm_ioremap(&pdev->dev, r->start, r->end - r->start + 1);
+	data->reg_base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
 	if (data->reg_base == NULL) {
 		dev_err(&pdev->dev, "unable to remap MMIO\n");
 		return -ENOMEM;
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
index e3e40a9..e9883eee 100644
--- a/drivers/power/gpio-charger.c
+++ b/drivers/power/gpio-charger.c
@@ -86,7 +86,8 @@
 		return -EINVAL;
 	}
 
-	gpio_charger = kzalloc(sizeof(*gpio_charger), GFP_KERNEL);
+	gpio_charger = devm_kzalloc(&pdev->dev, sizeof(*gpio_charger),
+					GFP_KERNEL);
 	if (!gpio_charger) {
 		dev_err(&pdev->dev, "Failed to alloc driver structure\n");
 		return -ENOMEM;
@@ -140,7 +141,6 @@
 err_gpio_free:
 	gpio_free(pdata->gpio);
 err_free:
-	kfree(gpio_charger);
 	return ret;
 }
 
@@ -156,7 +156,6 @@
 	gpio_free(gpio_charger->pdata->gpio);
 
 	platform_set_drvdata(pdev, NULL);
-	kfree(gpio_charger);
 
 	return 0;
 }
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 176ad59..fc04d19 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -411,7 +411,7 @@
 	struct isp1704_charger	*isp;
 	int			ret = -ENODEV;
 
-	isp = kzalloc(sizeof *isp, GFP_KERNEL);
+	isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
 	if (!isp)
 		return -ENOMEM;
 
@@ -477,8 +477,6 @@
 	isp1704_charger_set_power(isp, 0);
 	usb_put_phy(isp->phy);
 fail0:
-	kfree(isp);
-
 	dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret);
 
 	return ret;
@@ -492,7 +490,6 @@
 	power_supply_unregister(&isp->psy);
 	usb_put_phy(isp->phy);
 	isp1704_charger_set_power(isp, 0);
-	kfree(isp);
 
 	return 0;
 }
diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c
index 6d1f452..ed49b50 100644
--- a/drivers/power/lp8788-charger.c
+++ b/drivers/power/lp8788-charger.c
@@ -49,7 +49,6 @@
 #define LP8788_CHG_START		0x11
 #define LP8788_CHG_END			0x1C
 
-#define LP8788_BUF_SIZE			40
 #define LP8788_ISEL_MAX			23
 #define LP8788_ISEL_STEP		50
 #define LP8788_VTERM_MIN		4100
@@ -633,7 +632,7 @@
 	lp8788_read_byte(pchg->lp, LP8788_CHG_STATUS, &data);
 	state = (data & LP8788_CHG_STATE_M) >> LP8788_CHG_STATE_S;
 
-	return scnprintf(buf, LP8788_BUF_SIZE, "%s\n", desc[state]);
+	return scnprintf(buf, PAGE_SIZE, "%s\n", desc[state]);
 }
 
 static ssize_t lp8788_show_eoc_time(struct device *dev,
@@ -647,7 +646,7 @@
 	lp8788_read_byte(pchg->lp, LP8788_CHG_EOC, &val);
 	val = (val & LP8788_CHG_EOC_TIME_M) >> LP8788_CHG_EOC_TIME_S;
 
-	return scnprintf(buf, LP8788_BUF_SIZE, "End Of Charge Time: %s\n",
+	return scnprintf(buf, PAGE_SIZE, "End Of Charge Time: %s\n",
 			stime[val]);
 }
 
@@ -667,8 +666,7 @@
 	val = (val & LP8788_CHG_EOC_LEVEL_M) >> LP8788_CHG_EOC_LEVEL_S;
 	level = mode ? abs_level[val] : relative_level[val];
 
-	return scnprintf(buf, LP8788_BUF_SIZE, "End Of Charge Level: %s\n",
-			level);
+	return scnprintf(buf, PAGE_SIZE, "End Of Charge Level: %s\n", level);
 }
 
 static DEVICE_ATTR(charger_status, S_IRUSR, lp8788_show_charger_status, NULL);
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
index 74a0bd9..c7ff6d6 100644
--- a/drivers/power/max17040_battery.c
+++ b/drivers/power/max17040_battery.c
@@ -246,31 +246,34 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 
-static int max17040_suspend(struct i2c_client *client,
-		pm_message_t state)
+static int max17040_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct max17040_chip *chip = i2c_get_clientdata(client);
 
 	cancel_delayed_work(&chip->work);
 	return 0;
 }
 
-static int max17040_resume(struct i2c_client *client)
+static int max17040_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct max17040_chip *chip = i2c_get_clientdata(client);
 
 	schedule_delayed_work(&chip->work, MAX17040_DELAY);
 	return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(max17040_pm_ops, max17040_suspend, max17040_resume);
+#define MAX17040_PM_OPS (&max17040_pm_ops)
+
 #else
 
-#define max17040_suspend NULL
-#define max17040_resume NULL
+#define MAX17040_PM_OPS NULL
 
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
 
 static const struct i2c_device_id max17040_id[] = {
 	{ "max17040", 0 },
@@ -281,11 +284,10 @@
 static struct i2c_driver max17040_i2c_driver = {
 	.driver	= {
 		.name	= "max17040",
+		.pm	= MAX17040_PM_OPS,
 	},
 	.probe		= max17040_probe,
 	.remove		= max17040_remove,
-	.suspend	= max17040_suspend,
-	.resume		= max17040_resume,
 	.id_table	= max17040_id,
 };
 module_i2c_driver(max17040_i2c_driver);
diff --git a/drivers/power/max8903_charger.c b/drivers/power/max8903_charger.c
index 14e2b96..08f0d79 100644
--- a/drivers/power/max8903_charger.c
+++ b/drivers/power/max8903_charger.c
@@ -189,7 +189,7 @@
 	int ta_in = 0;
 	int usb_in = 0;
 
-	data = kzalloc(sizeof(struct max8903_data), GFP_KERNEL);
+	data = devm_kzalloc(dev, sizeof(struct max8903_data), GFP_KERNEL);
 	if (data == NULL) {
 		dev_err(dev, "Cannot allocate memory.\n");
 		return -ENOMEM;
@@ -341,7 +341,6 @@
 err_psy:
 	power_supply_unregister(&data->psy);
 err:
-	kfree(data);
 	return ret;
 }
 
@@ -359,7 +358,6 @@
 		if (pdata->dc_valid)
 			free_irq(gpio_to_irq(pdata->dok), data);
 		power_supply_unregister(&data->psy);
-		kfree(data);
 	}
 
 	return 0;
diff --git a/drivers/power/max8925_power.c b/drivers/power/max8925_power.c
index 665cdc7..0ee1e14 100644
--- a/drivers/power/max8925_power.c
+++ b/drivers/power/max8925_power.c
@@ -489,7 +489,8 @@
 		return -EINVAL;
 	}
 
-	info = kzalloc(sizeof(struct max8925_power_info), GFP_KERNEL);
+	info = devm_kzalloc(&pdev->dev, sizeof(struct max8925_power_info),
+				GFP_KERNEL);
 	if (!info)
 		return -ENOMEM;
 	info->chip = chip;
@@ -546,7 +547,6 @@
 out_usb:
 	power_supply_unregister(&info->ac);
 out:
-	kfree(info);
 	return ret;
 }
 
@@ -559,7 +559,6 @@
 		power_supply_unregister(&info->usb);
 		power_supply_unregister(&info->battery);
 		max8925_deinit_charger(info);
-		kfree(info);
 	}
 	return 0;
 }
diff --git a/drivers/power/max8997_charger.c b/drivers/power/max8997_charger.c
index e757885..4bdedfe 100644
--- a/drivers/power/max8997_charger.c
+++ b/drivers/power/max8997_charger.c
@@ -138,7 +138,8 @@
 		return ret;
 	}
 
-	charger = kzalloc(sizeof(struct charger_data), GFP_KERNEL);
+	charger = devm_kzalloc(&pdev->dev, sizeof(struct charger_data),
+				GFP_KERNEL);
 	if (charger == NULL) {
 		dev_err(&pdev->dev, "Cannot allocate memory.\n");
 		return -ENOMEM;
@@ -158,13 +159,10 @@
 	ret = power_supply_register(&pdev->dev, &charger->battery);
 	if (ret) {
 		dev_err(&pdev->dev, "failed: power supply register\n");
-		goto err;
+		return ret;
 	}
 
 	return 0;
-err:
-	kfree(charger);
-	return ret;
 }
 
 static int max8997_battery_remove(struct platform_device *pdev)
@@ -172,7 +170,6 @@
 	struct charger_data *charger = platform_get_drvdata(pdev);
 
 	power_supply_unregister(&charger->battery);
-	kfree(charger);
 	return 0;
 }
 
diff --git a/drivers/power/max8998_charger.c b/drivers/power/max8998_charger.c
index bf677e3..5017470 100644
--- a/drivers/power/max8998_charger.c
+++ b/drivers/power/max8998_charger.c
@@ -88,7 +88,8 @@
 		return -ENODEV;
 	}
 
-	max8998 = kzalloc(sizeof(struct max8998_battery_data), GFP_KERNEL);
+	max8998 = devm_kzalloc(&pdev->dev, sizeof(struct max8998_battery_data),
+				GFP_KERNEL);
 	if (!max8998)
 		return -ENOMEM;
 
@@ -174,7 +175,6 @@
 
 	return 0;
 err:
-	kfree(max8998);
 	return ret;
 }
 
@@ -183,7 +183,6 @@
 	struct max8998_battery_data *max8998 = platform_get_drvdata(pdev);
 
 	power_supply_unregister(&max8998->battery);
-	kfree(max8998);
 
 	return 0;
 }
diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/pcf50633-charger.c
index c2122a7..17fd77f 100644
--- a/drivers/power/pcf50633-charger.c
+++ b/drivers/power/pcf50633-charger.c
@@ -373,7 +373,7 @@
 	int i;
 	u8 mbcs1;
 
-	mbc = kzalloc(sizeof(*mbc), GFP_KERNEL);
+	mbc = devm_kzalloc(&pdev->dev, sizeof(*mbc), GFP_KERNEL);
 	if (!mbc)
 		return -ENOMEM;
 
@@ -413,7 +413,6 @@
 	ret = power_supply_register(&pdev->dev, &mbc->adapter);
 	if (ret) {
 		dev_err(mbc->pcf->dev, "failed to register adapter\n");
-		kfree(mbc);
 		return ret;
 	}
 
@@ -421,7 +420,6 @@
 	if (ret) {
 		dev_err(mbc->pcf->dev, "failed to register usb\n");
 		power_supply_unregister(&mbc->adapter);
-		kfree(mbc);
 		return ret;
 	}
 
@@ -430,7 +428,6 @@
 		dev_err(mbc->pcf->dev, "failed to register ac\n");
 		power_supply_unregister(&mbc->adapter);
 		power_supply_unregister(&mbc->usb);
-		kfree(mbc);
 		return ret;
 	}
 
@@ -461,8 +458,6 @@
 	power_supply_unregister(&mbc->adapter);
 	power_supply_unregister(&mbc->ac);
 
-	kfree(mbc);
-
 	return 0;
 }
 
diff --git a/drivers/power/pm2301_charger.c b/drivers/power/pm2301_charger.c
index ee346d4..a441751 100644
--- a/drivers/power/pm2301_charger.c
+++ b/drivers/power/pm2301_charger.c
@@ -16,24 +16,25 @@
 #include <linux/slab.h>
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
-#include <linux/completion.h>
 #include <linux/regulator/consumer.h>
 #include <linux/err.h>
 #include <linux/i2c.h>
 #include <linux/workqueue.h>
-#include <linux/kobject.h>
-#include <linux/mfd/abx500.h>
 #include <linux/mfd/abx500/ab8500.h>
 #include <linux/mfd/abx500/ab8500-bm.h>
-#include <linux/mfd/abx500/ab8500-gpadc.h>
 #include <linux/mfd/abx500/ux500_chargalg.h>
 #include <linux/pm2301_charger.h>
 #include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm.h>
 
 #include "pm2301_charger.h"
 
 #define to_pm2xxx_charger_ac_device_info(x) container_of((x), \
 		struct pm2xxx_charger, ac_chg)
+#define SLEEP_MIN		50
+#define SLEEP_MAX		100
+#define PM2XXX_AUTOSUSPEND_DELAY 500
 
 static int pm2xxx_interrupt_registers[] = {
 	PM2XXX_REG_INT1,
@@ -113,33 +114,24 @@
 
 static void set_lpn_pin(struct pm2xxx_charger *pm2)
 {
-	if (pm2->ac.charger_connected)
-		return;
-	gpio_set_value(pm2->lpn_pin, 1);
-
-	return;
+	if (!pm2->ac.charger_connected && gpio_is_valid(pm2->lpn_pin)) {
+		gpio_set_value(pm2->lpn_pin, 1);
+		usleep_range(SLEEP_MIN, SLEEP_MAX);
+	}
 }
 
 static void clear_lpn_pin(struct pm2xxx_charger *pm2)
 {
-	if (pm2->ac.charger_connected)
-		return;
-	gpio_set_value(pm2->lpn_pin, 0);
-
-	return;
+	if (!pm2->ac.charger_connected && gpio_is_valid(pm2->lpn_pin))
+		gpio_set_value(pm2->lpn_pin, 0);
 }
 
 static int pm2xxx_reg_read(struct pm2xxx_charger *pm2, int reg, u8 *val)
 {
 	int ret;
-	/*
-	 * When AC adaptor is unplugged, the host
-	 * must put LPN high to be able to
-	 * communicate by I2C with PM2301
-	 * and receive I2C "acknowledge" from PM2301.
-	 */
-	mutex_lock(&pm2->lock);
-	set_lpn_pin(pm2);
+
+	/* wake up the device */
+	pm_runtime_get_sync(pm2->dev);
 
 	ret = i2c_smbus_read_i2c_block_data(pm2->config.pm2xxx_i2c, reg,
 				1, val);
@@ -147,8 +139,8 @@
 		dev_err(pm2->dev, "Error reading register at 0x%x\n", reg);
 	else
 		ret = 0;
-	clear_lpn_pin(pm2);
-	mutex_unlock(&pm2->lock);
+
+	pm_runtime_put_sync(pm2->dev);
 
 	return ret;
 }
@@ -156,14 +148,9 @@
 static int pm2xxx_reg_write(struct pm2xxx_charger *pm2, int reg, u8 val)
 {
 	int ret;
-	/*
-	 * When AC adaptor is unplugged, the host
-	 * must put LPN high to be able to
-	 * communicate by I2C with PM2301
-	 * and receive I2C "acknowledge" from PM2301.
-	 */
-	mutex_lock(&pm2->lock);
-	set_lpn_pin(pm2);
+
+	/* wake up the device */
+	pm_runtime_get_sync(pm2->dev);
 
 	ret = i2c_smbus_write_i2c_block_data(pm2->config.pm2xxx_i2c, reg,
 				1, &val);
@@ -171,8 +158,8 @@
 		dev_err(pm2->dev, "Error writing register at 0x%x\n", reg);
 	else
 		ret = 0;
-	clear_lpn_pin(pm2);
-	mutex_unlock(&pm2->lock);
+
+	pm_runtime_put_sync(pm2->dev);
 
 	return ret;
 }
@@ -192,11 +179,22 @@
 {
 	int ret;
 
+	/* Disable SW EOC ctrl */
+	ret = pm2xxx_reg_write(pm2, PM2XXX_SW_CTRL_REG, PM2XXX_SWCTRL_HW);
+	if (ret < 0) {
+		dev_err(pm2->dev, "%s pm2xxx write failed\n", __func__);
+		return ret;
+	}
+
 	/* Disable charging */
 	ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_CTRL_REG2,
 			(PM2XXX_CH_AUTO_RESUME_DIS | PM2XXX_CHARGER_DIS));
+	if (ret < 0) {
+		dev_err(pm2->dev, "%s pm2xxx write failed\n", __func__);
+		return ret;
+	}
 
-	return ret;
+	return 0;
 }
 
 static int pm2xxx_charger_batt_therm_mngt(struct pm2xxx_charger *pm2, int val)
@@ -216,21 +214,14 @@
 
 static int pm2xxx_charger_ovv_mngt(struct pm2xxx_charger *pm2, int val)
 {
-	int ret = 0;
+	dev_err(pm2->dev, "Overvoltage detected\n");
+	pm2->flags.ovv = true;
+	power_supply_changed(&pm2->ac_chg.psy);
 
-	pm2->failure_input_ovv++;
-	if (pm2->failure_input_ovv < 4) {
-		ret = pm2xxx_charging_enable_mngt(pm2);
-		goto out;
-	} else {
-		pm2->failure_input_ovv = 0;
-		dev_err(pm2->dev, "Overvoltage detected\n");
-		pm2->flags.ovv = true;
-		power_supply_changed(&pm2->ac_chg.psy);
-	}
+	/* Schedule a new HW failure check */
+	queue_delayed_work(pm2->charger_wq, &pm2->check_hw_failure_work, 0);
 
-out:
-	return ret;
+	return 0;
 }
 
 static int pm2xxx_charger_wd_exp_mngt(struct pm2xxx_charger *pm2, int val)
@@ -245,13 +236,29 @@
 
 static int pm2xxx_charger_vbat_lsig_mngt(struct pm2xxx_charger *pm2, int val)
 {
+	int ret;
+
 	switch (val) {
 	case PM2XXX_INT1_ITVBATLOWR:
 		dev_dbg(pm2->dev, "VBAT grows above VBAT_LOW level\n");
+		/* Enable SW EOC ctrl */
+		ret = pm2xxx_reg_write(pm2, PM2XXX_SW_CTRL_REG,
+							PM2XXX_SWCTRL_SW);
+		if (ret < 0) {
+			dev_err(pm2->dev, "%s pm2xxx write failed\n", __func__);
+			return ret;
+		}
 		break;
 
 	case PM2XXX_INT1_ITVBATLOWF:
 		dev_dbg(pm2->dev, "VBAT drops below VBAT_LOW level\n");
+		/* Disable SW EOC ctrl */
+		ret = pm2xxx_reg_write(pm2, PM2XXX_SW_CTRL_REG,
+							PM2XXX_SWCTRL_HW);
+		if (ret < 0) {
+			dev_err(pm2->dev, "%s pm2xxx write failed\n", __func__);
+			return ret;
+		}
 		break;
 
 	default:
@@ -322,16 +329,27 @@
 	struct pm2xxx_charger *pm2 = pm2_data;
 	int ret = 0;
 
-	if (val & (PM2XXX_INT1_ITVBATLOWR | PM2XXX_INT1_ITVBATLOWF)) {
-		ret = pm2xxx_charger_vbat_lsig_mngt(pm2, val &
-			(PM2XXX_INT1_ITVBATLOWR | PM2XXX_INT1_ITVBATLOWF));
+	if (val & PM2XXX_INT1_ITVBATLOWR) {
+		ret = pm2xxx_charger_vbat_lsig_mngt(pm2,
+						PM2XXX_INT1_ITVBATLOWR);
+		if (ret < 0)
+			goto out;
+	}
+
+	if (val & PM2XXX_INT1_ITVBATLOWF) {
+		ret = pm2xxx_charger_vbat_lsig_mngt(pm2,
+						PM2XXX_INT1_ITVBATLOWF);
+		if (ret < 0)
+			goto out;
 	}
 
 	if (val & PM2XXX_INT1_ITVBATDISCONNECT) {
 		ret = pm2xxx_charger_bat_disc_mngt(pm2,
 				PM2XXX_INT1_ITVBATDISCONNECT);
+		if (ret < 0)
+			goto out;
 	}
-
+out:
 	return ret;
 }
 
@@ -447,7 +465,6 @@
 	struct pm2xxx_charger *pm2 = pm2_data;
 	int ret = 0;
 
-
 	if (val & (PM2XXX_INT6_ITVPWR2DROP | PM2XXX_INT6_ITVPWR1DROP)) {
 		dev_dbg(pm2->dev, "VMPWR drop to VBAT level\n");
 	}
@@ -468,14 +485,22 @@
 	struct pm2xxx_interrupts *interrupt = pm2->pm2_int;
 	int i;
 
-	for (i = 0; i < PM2XXX_NUM_INT_REG; i++) {
-		 pm2xxx_reg_read(pm2,
+	/* wake up the device */
+	pm_runtime_get_sync(pm2->dev);
+
+	do {
+		for (i = 0; i < PM2XXX_NUM_INT_REG; i++) {
+			pm2xxx_reg_read(pm2,
 				pm2xxx_interrupt_registers[i],
 				&(interrupt->reg[i]));
 
-		if (interrupt->reg[i] > 0)
-			interrupt->handler[i](pm2, interrupt->reg[i]);
-	}
+			if (interrupt->reg[i] > 0)
+				interrupt->handler[i](pm2, interrupt->reg[i]);
+		}
+	} while (gpio_get_value(pm2->pdata->gpio_irq_number) == 0);
+
+	pm_runtime_mark_last_busy(pm2->dev);
+	pm_runtime_put_autosuspend(pm2->dev);
 
 	return IRQ_HANDLED;
 }
@@ -592,6 +617,8 @@
 			val->intval = POWER_SUPPLY_HEALTH_DEAD;
 		else if (pm2->flags.main_thermal_prot)
 			val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+		else if (pm2->flags.ovv)
+			val->intval = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
 		else
 			val->intval = POWER_SUPPLY_HEALTH_GOOD;
 		break;
@@ -674,10 +701,6 @@
 	ret = pm2xxx_reg_write(pm2, PM2XXX_BATT_LOW_LEV_COMP_REG,
 		PM2XXX_VBAT_LOW_MONITORING_ENA);
 
-	/* Disable LED */
-	ret = pm2xxx_reg_write(pm2, PM2XXX_LED_CTRL_REG,
-		PM2XXX_LED_SELECT_DIS);
-
 	return ret;
 }
 
@@ -822,10 +845,54 @@
 	sysfs_notify(&pm2->ac_chg.psy.dev->kobj, NULL, "present");
 };
 
+static void pm2xxx_charger_check_hw_failure_work(struct work_struct *work)
+{
+	u8 reg_value;
+
+	struct pm2xxx_charger *pm2 = container_of(work,
+		struct pm2xxx_charger, check_hw_failure_work.work);
+
+	if (pm2->flags.ovv) {
+		pm2xxx_reg_read(pm2, PM2XXX_SRCE_REG_INT4, &reg_value);
+
+		if (!(reg_value & (PM2XXX_INT4_S_ITVPWR1OVV |
+					PM2XXX_INT4_S_ITVPWR2OVV))) {
+			pm2->flags.ovv = false;
+			power_supply_changed(&pm2->ac_chg.psy);
+		}
+	}
+
+	/* If we still have a failure, schedule a new check */
+	if (pm2->flags.ovv) {
+		queue_delayed_work(pm2->charger_wq,
+			&pm2->check_hw_failure_work, round_jiffies(HZ));
+	}
+}
+
 static void pm2xxx_charger_check_main_thermal_prot_work(
 	struct work_struct *work)
 {
-};
+	int ret;
+	u8 val;
+
+	struct pm2xxx_charger *pm2 = container_of(work, struct pm2xxx_charger,
+					check_main_thermal_prot_work);
+
+	/* Check if die temp warning is still active */
+	ret = pm2xxx_reg_read(pm2, PM2XXX_SRCE_REG_INT5, &val);
+	if (ret < 0) {
+		dev_err(pm2->dev, "%s pm2xxx read failed\n", __func__);
+		return;
+	}
+	if (val & (PM2XXX_INT5_S_ITTHERMALWARNINGRISE
+			| PM2XXX_INT5_S_ITTHERMALSHUTDOWNRISE))
+		pm2->flags.main_thermal_prot = true;
+	else if (val & (PM2XXX_INT5_S_ITTHERMALWARNINGFALL
+				| PM2XXX_INT5_S_ITTHERMALSHUTDOWNFALL))
+		pm2->flags.main_thermal_prot = false;
+
+	power_supply_changed(&pm2->ac_chg.psy);
+}
 
 static struct pm2xxx_interrupts pm2xxx_int = {
 	.handler[0] = pm2_int_reg0,
@@ -840,24 +907,105 @@
 	{"PM2XXX_IRQ_INT", pm2xxx_irq_int},
 };
 
-static int pm2xxx_wall_charger_resume(struct i2c_client *i2c_client)
+#ifdef CONFIG_PM
+
+#ifdef CONFIG_PM_SLEEP
+
+static int pm2xxx_wall_charger_resume(struct device *dev)
 {
+	struct i2c_client *i2c_client = to_i2c_client(dev);
+	struct pm2xxx_charger *pm2;
+
+	pm2 =  (struct pm2xxx_charger *)i2c_get_clientdata(i2c_client);
+	set_lpn_pin(pm2);
+
+	/* If we still have a HW failure, schedule a new check */
+	if (pm2->flags.ovv)
+		queue_delayed_work(pm2->charger_wq,
+				&pm2->check_hw_failure_work, 0);
+
 	return 0;
 }
 
-static int pm2xxx_wall_charger_suspend(struct i2c_client *i2c_client,
-	pm_message_t state)
+static int pm2xxx_wall_charger_suspend(struct device *dev)
 {
+	struct i2c_client *i2c_client = to_i2c_client(dev);
+	struct pm2xxx_charger *pm2;
+
+	pm2 =  (struct pm2xxx_charger *)i2c_get_clientdata(i2c_client);
+	clear_lpn_pin(pm2);
+
+	/* Cancel any pending HW failure check */
+	if (delayed_work_pending(&pm2->check_hw_failure_work))
+		cancel_delayed_work(&pm2->check_hw_failure_work);
+
+	flush_work(&pm2->ac_work);
+	flush_work(&pm2->check_main_thermal_prot_work);
+
 	return 0;
 }
 
-static int __devinit pm2xxx_wall_charger_probe(struct i2c_client *i2c_client,
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+
+static int  pm2xxx_runtime_suspend(struct device *dev)
+{
+	struct i2c_client *pm2xxx_i2c_client = to_i2c_client(dev);
+	struct pm2xxx_charger *pm2;
+	int ret = 0;
+
+	pm2 = (struct pm2xxx_charger *)i2c_get_clientdata(pm2xxx_i2c_client);
+	if (!pm2) {
+		dev_err(pm2->dev, "no pm2xxx_charger data supplied\n");
+		ret = -EINVAL;
+		return ret;
+	}
+
+	clear_lpn_pin(pm2);
+
+	return ret;
+}
+
+static int  pm2xxx_runtime_resume(struct device *dev)
+{
+	struct i2c_client *pm2xxx_i2c_client = to_i2c_client(dev);
+	struct pm2xxx_charger *pm2;
+	int ret = 0;
+
+	pm2 = (struct pm2xxx_charger *)i2c_get_clientdata(pm2xxx_i2c_client);
+	if (!pm2) {
+		dev_err(pm2->dev, "no pm2xxx_charger data supplied\n");
+		ret = -EINVAL;
+		return ret;
+	}
+
+	if (gpio_is_valid(pm2->lpn_pin) && gpio_get_value(pm2->lpn_pin) == 0)
+		set_lpn_pin(pm2);
+
+	return ret;
+}
+
+#endif
+
+static const struct dev_pm_ops pm2xxx_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pm2xxx_wall_charger_suspend,
+		pm2xxx_wall_charger_resume)
+	SET_RUNTIME_PM_OPS(pm2xxx_runtime_suspend, pm2xxx_runtime_resume, NULL)
+};
+#define  PM2XXX_PM_OPS (&pm2xxx_pm_ops)
+#else
+#define  PM2XXX_PM_OPS  NULL
+#endif
+
+static int pm2xxx_wall_charger_probe(struct i2c_client *i2c_client,
 		const struct i2c_device_id *id)
 {
 	struct pm2xxx_platform_data *pl_data = i2c_client->dev.platform_data;
 	struct pm2xxx_charger *pm2;
 	int ret = 0;
 	u8 val;
+	int i;
 
 	pm2 = kzalloc(sizeof(struct pm2xxx_charger), GFP_KERNEL);
 	if (!pm2) {
@@ -867,7 +1015,6 @@
 
 	/* get parent data */
 	pm2->dev = &i2c_client->dev;
-	pm2->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
 
 	pm2->pm2_int = &pm2xxx_int;
 
@@ -889,14 +1036,6 @@
 
 	pm2->bat = pl_data->battery;
 
-	/*get lpn GPIO from platform data*/
-	if (!pm2->pdata->lpn_gpio) {
-		dev_err(pm2->dev, "no lpn gpio data supplied\n");
-		ret = -EINVAL;
-		goto free_device_info;
-	}
-	pm2->lpn_pin = pm2->pdata->lpn_gpio;
-
 	if (!i2c_check_functionality(i2c_client->adapter,
 			I2C_FUNC_SMBUS_BYTE_DATA |
 			I2C_FUNC_SMBUS_READ_WORD_DATA)) {
@@ -945,6 +1084,10 @@
 	INIT_WORK(&pm2->check_main_thermal_prot_work,
 		pm2xxx_charger_check_main_thermal_prot_work);
 
+	/* Init work for HW failure check */
+	INIT_DEFERRABLE_WORK(&pm2->check_hw_failure_work,
+		pm2xxx_charger_check_hw_failure_work);
+
 	/*
 	 * VDD ADC supply needs to be enabled from this driver when there
 	 * is a charger connected to avoid erroneous BTEMP_HIGH/LOW
@@ -965,40 +1108,72 @@
 	}
 
 	/* Register interrupts */
-	ret = request_threaded_irq(pm2->pdata->irq_number, NULL,
+	ret = request_threaded_irq(gpio_to_irq(pm2->pdata->gpio_irq_number),
+				NULL,
 				pm2xxx_charger_irq[0].isr,
 				pm2->pdata->irq_type,
 				pm2xxx_charger_irq[0].name, pm2);
 
 	if (ret != 0) {
 		dev_err(pm2->dev, "failed to request %s IRQ %d: %d\n",
-		pm2xxx_charger_irq[0].name, pm2->pdata->irq_number, ret);
+		pm2xxx_charger_irq[0].name,
+			gpio_to_irq(pm2->pdata->gpio_irq_number), ret);
 		goto unregister_pm2xxx_charger;
 	}
 
-	/*Initialize lock*/
+	ret = pm_runtime_set_active(pm2->dev);
+	if (ret)
+		dev_err(pm2->dev, "set active Error\n");
+
+	pm_runtime_enable(pm2->dev);
+	pm_runtime_set_autosuspend_delay(pm2->dev, PM2XXX_AUTOSUSPEND_DELAY);
+	pm_runtime_use_autosuspend(pm2->dev);
+	pm_runtime_resume(pm2->dev);
+
+	/* pm interrupt can wake up system */
+	ret = enable_irq_wake(gpio_to_irq(pm2->pdata->gpio_irq_number));
+	if (ret) {
+		dev_err(pm2->dev, "failed to set irq wake\n");
+		goto unregister_pm2xxx_interrupt;
+	}
+
 	mutex_init(&pm2->lock);
 
-	/*
-	 * Charger detection mechanism requires pulling up the LPN pin
-	 * while i2c communication if Charger is not connected
-	 * LPN pin of PM2301 is GPIO60 of AB9540
-	 */
-	ret = gpio_request(pm2->lpn_pin, "pm2301_lpm_gpio");
-	if (ret < 0) {
-		dev_err(pm2->dev, "pm2301_lpm_gpio request failed\n");
-		goto unregister_pm2xxx_charger;
+	if (gpio_is_valid(pm2->pdata->lpn_gpio)) {
+		/* get lpn GPIO from platform data */
+		pm2->lpn_pin = pm2->pdata->lpn_gpio;
+
+		/*
+		 * Charger detection mechanism requires pulling up the LPN pin
+		 * while i2c communication if Charger is not connected
+		 * LPN pin of PM2301 is GPIO60 of AB9540
+		 */
+		ret = gpio_request(pm2->lpn_pin, "pm2301_lpm_gpio");
+
+		if (ret < 0) {
+			dev_err(pm2->dev, "pm2301_lpm_gpio request failed\n");
+			goto disable_pm2_irq_wake;
+		}
+		ret = gpio_direction_output(pm2->lpn_pin, 0);
+		if (ret < 0) {
+			dev_err(pm2->dev, "pm2301_lpm_gpio direction failed\n");
+			goto free_gpio;
+		}
+		set_lpn_pin(pm2);
 	}
-	ret = gpio_direction_output(pm2->lpn_pin, 0);
-	if (ret < 0) {
-		dev_err(pm2->dev, "pm2301_lpm_gpio direction failed\n");
-		goto free_gpio;
-	}
+
+	/* read  interrupt registers */
+	for (i = 0; i < PM2XXX_NUM_INT_REG; i++)
+		pm2xxx_reg_read(pm2,
+			pm2xxx_interrupt_registers[i],
+			&val);
 
 	ret = pm2xxx_charger_detection(pm2, &val);
 
 	if ((ret == 0) && val) {
 		pm2->ac.charger_connected = 1;
+		ab8500_override_turn_on_stat(~AB8500_POW_KEY_1_ON,
+					     AB8500_MAIN_CH_DET);
 		pm2->ac_conn = true;
 		power_supply_changed(&pm2->ac_chg.psy);
 		sysfs_notify(&pm2->ac_chg.psy.dev->kobj, NULL, "present");
@@ -1007,7 +1182,13 @@
 	return 0;
 
 free_gpio:
-	gpio_free(pm2->lpn_pin);
+	if (gpio_is_valid(pm2->lpn_pin))
+		gpio_free(pm2->lpn_pin);
+disable_pm2_irq_wake:
+	disable_irq_wake(gpio_to_irq(pm2->pdata->gpio_irq_number));
+unregister_pm2xxx_interrupt:
+	/* disable interrupt */
+	free_irq(gpio_to_irq(pm2->pdata->gpio_irq_number), pm2);
 unregister_pm2xxx_charger:
 	/* unregister power supply */
 	power_supply_unregister(&pm2->ac_chg.psy);
@@ -1018,18 +1199,24 @@
 	destroy_workqueue(pm2->charger_wq);
 free_device_info:
 	kfree(pm2);
+
 	return ret;
 }
 
-static int __devexit pm2xxx_wall_charger_remove(struct i2c_client *i2c_client)
+static int pm2xxx_wall_charger_remove(struct i2c_client *i2c_client)
 {
 	struct pm2xxx_charger *pm2 = i2c_get_clientdata(i2c_client);
 
+	/* Disable pm_runtime */
+	pm_runtime_disable(pm2->dev);
 	/* Disable AC charging */
 	pm2xxx_charger_ac_en(&pm2->ac_chg, false, 0, 0);
 
+	/* Disable wake by pm interrupt */
+	disable_irq_wake(gpio_to_irq(pm2->pdata->gpio_irq_number));
+
 	/* Disable interrupts */
-	free_irq(pm2->pdata->irq_number, pm2);
+	free_irq(gpio_to_irq(pm2->pdata->gpio_irq_number), pm2);
 
 	/* Delete the work queue */
 	destroy_workqueue(pm2->charger_wq);
@@ -1041,8 +1228,8 @@
 
 	power_supply_unregister(&pm2->ac_chg.psy);
 
-	/*Free GPIO60*/
-	gpio_free(pm2->lpn_pin);
+	if (gpio_is_valid(pm2->lpn_pin))
+		gpio_free(pm2->lpn_pin);
 
 	kfree(pm2);
 
@@ -1058,12 +1245,11 @@
 
 static struct i2c_driver pm2xxx_charger_driver = {
 	.probe = pm2xxx_wall_charger_probe,
-	.remove = __devexit_p(pm2xxx_wall_charger_remove),
-	.suspend = pm2xxx_wall_charger_suspend,
-	.resume = pm2xxx_wall_charger_resume,
+	.remove = pm2xxx_wall_charger_remove,
 	.driver = {
 		.name = "pm2xxx-wall_charger",
 		.owner = THIS_MODULE,
+		.pm = PM2XXX_PM_OPS,
 	},
 	.id_table = pm2xxx_id,
 };
@@ -1078,11 +1264,10 @@
 	i2c_del_driver(&pm2xxx_charger_driver);
 }
 
-subsys_initcall_sync(pm2xxx_charger_init);
+device_initcall_sync(pm2xxx_charger_init);
 module_exit(pm2xxx_charger_exit);
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Rajkumar kasirajan, Olivier Launay");
 MODULE_ALIAS("platform:pm2xxx-charger");
 MODULE_DESCRIPTION("PM2xxx charger management driver");
-
diff --git a/drivers/power/pm2301_charger.h b/drivers/power/pm2301_charger.h
index e6319cd..8ce3cc0 100644
--- a/drivers/power/pm2301_charger.h
+++ b/drivers/power/pm2301_charger.h
@@ -9,27 +9,6 @@
 #ifndef PM2301_CHARGER_H
 #define PM2301_CHARGER_H
 
-#define MAIN_WDOG_ENA			0x01
-#define MAIN_WDOG_KICK			0x02
-#define MAIN_WDOG_DIS			0x00
-#define CHARG_WD_KICK			0x01
-#define MAIN_CH_ENA			0x01
-#define MAIN_CH_NO_OVERSHOOT_ENA_N	0x02
-#define MAIN_CH_DET			0x01
-#define MAIN_CH_CV_ON			0x04
-#define OTP_ENABLE_WD			0x01
-
-#define MAIN_CH_INPUT_CURR_SHIFT	4
-
-#define LED_INDICATOR_PWM_ENA		0x01
-#define LED_INDICATOR_PWM_DIS		0x00
-#define LED_IND_CUR_5MA			0x04
-#define LED_INDICATOR_PWM_DUTY_252_256	0xBF
-
-/* HW failure constants */
-#define MAIN_CH_TH_PROT			0x02
-#define MAIN_CH_NOK			0x01
-
 /* Watchdog timeout constant */
 #define WD_TIMER			0x30 /* 4min */
 #define WD_KICK_INTERVAL		(30 * HZ)
@@ -495,7 +474,6 @@
 	int failure_input_ovv;
 	unsigned int lpn_pin;
 	struct pm2xxx_interrupts *pm2_int;
-	struct ab8500_gpadc *gpadc;
 	struct regulator *regu;
 	struct pm2xxx_bm_data *bat;
 	struct mutex lock;
@@ -506,6 +484,7 @@
 	struct delayed_work check_vbat_work;
 	struct work_struct ac_work;
 	struct work_struct check_main_thermal_prot_work;
+	struct delayed_work check_hw_failure_work;
 	struct ux500_charger ac_chg;
 	struct pm2xxx_charger_event_flags flags;
 };
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 5deac43..1c517c3 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -26,17 +26,42 @@
 
 static struct device_type power_supply_dev_type;
 
+static bool __power_supply_is_supplied_by(struct power_supply *supplier,
+					 struct power_supply *supply)
+{
+	int i;
+
+	if (!supply->supplied_from && !supplier->supplied_to)
+		return false;
+
+	/* Support both supplied_to and supplied_from modes */
+	if (supply->supplied_from) {
+		if (!supplier->name)
+			return false;
+		for (i = 0; i < supply->num_supplies; i++)
+			if (!strcmp(supplier->name, supply->supplied_from[i]))
+				return true;
+	} else {
+		if (!supply->name)
+			return false;
+		for (i = 0; i < supplier->num_supplicants; i++)
+			if (!strcmp(supplier->supplied_to[i], supply->name))
+				return true;
+	}
+
+	return false;
+}
+
 static int __power_supply_changed_work(struct device *dev, void *data)
 {
 	struct power_supply *psy = (struct power_supply *)data;
 	struct power_supply *pst = dev_get_drvdata(dev);
-	int i;
 
-	for (i = 0; i < psy->num_supplicants; i++)
-		if (!strcmp(psy->supplied_to[i], pst->name)) {
-			if (pst->external_power_changed)
-				pst->external_power_changed(pst);
-		}
+	if (__power_supply_is_supplied_by(psy, pst)) {
+		if (pst->external_power_changed)
+			pst->external_power_changed(pst);
+	}
+
 	return 0;
 }
 
@@ -63,22 +88,151 @@
 }
 EXPORT_SYMBOL_GPL(power_supply_changed);
 
+#ifdef CONFIG_OF
+#include <linux/of.h>
+
+static int __power_supply_populate_supplied_from(struct device *dev,
+						 void *data)
+{
+	struct power_supply *psy = (struct power_supply *)data;
+	struct power_supply *epsy = dev_get_drvdata(dev);
+	struct device_node *np;
+	int i = 0;
+
+	do {
+		np = of_parse_phandle(psy->of_node, "power-supplies", i++);
+		if (!np)
+			continue;
+
+		if (np == epsy->of_node) {
+			dev_info(psy->dev, "%s: Found supply : %s\n",
+				psy->name, epsy->name);
+			psy->supplied_from[i-1] = (char *)epsy->name;
+			psy->num_supplies++;
+			break;
+		}
+	} while (np);
+
+	return 0;
+}
+
+static int power_supply_populate_supplied_from(struct power_supply *psy)
+{
+	int error;
+
+	error = class_for_each_device(power_supply_class, NULL, psy,
+				      __power_supply_populate_supplied_from);
+
+	dev_dbg(psy->dev, "%s %d\n", __func__, error);
+
+	return error;
+}
+
+static int  __power_supply_find_supply_from_node(struct device *dev,
+						 void *data)
+{
+	struct device_node *np = (struct device_node *)data;
+	struct power_supply *epsy = dev_get_drvdata(dev);
+
+	/* return error breaks out of class_for_each_device loop */
+	if (epsy->of_node == np)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int power_supply_find_supply_from_node(struct device_node *supply_node)
+{
+	int error;
+	struct device *dev;
+	struct class_dev_iter iter;
+
+	/*
+	 * Use iterator to see if any other device is registered.
+	 * This is required since class_for_each_device returns 0
+	 * if there are no devices registered.
+	 */
+	class_dev_iter_init(&iter, power_supply_class, NULL, NULL);
+	dev = class_dev_iter_next(&iter);
+
+	if (!dev)
+		return -EPROBE_DEFER;
+
+	/*
+	 * We have to treat the return value as inverted, because if
+	 * we return error on not found, then it won't continue looking.
+	 * So we trick it by returning error on success to stop looking
+	 * once the matching device is found.
+	 */
+	error = class_for_each_device(power_supply_class, NULL, supply_node,
+				       __power_supply_find_supply_from_node);
+
+	return error ? 0 : -EPROBE_DEFER;
+}
+
+static int power_supply_check_supplies(struct power_supply *psy)
+{
+	struct device_node *np;
+	int cnt = 0;
+
+	/* If there is already a list honor it */
+	if (psy->supplied_from && psy->num_supplies > 0)
+		return 0;
+
+	/* No device node found, nothing to do */
+	if (!psy->of_node)
+		return 0;
+
+	do {
+		int ret;
+
+		np = of_parse_phandle(psy->of_node, "power-supplies", cnt++);
+		if (!np)
+			continue;
+
+		ret = power_supply_find_supply_from_node(np);
+		if (ret) {
+			dev_dbg(psy->dev, "Failed to find supply, defer!\n");
+			return -EPROBE_DEFER;
+		}
+	} while (np);
+
+	/* All supplies found, allocate char ** array for filling */
+	psy->supplied_from = devm_kzalloc(psy->dev, sizeof(psy->supplied_from),
+					  GFP_KERNEL);
+	if (!psy->supplied_from) {
+		dev_err(psy->dev, "Couldn't allocate memory for supply list\n");
+		return -ENOMEM;
+	}
+
+	*psy->supplied_from = devm_kzalloc(psy->dev, sizeof(char *) * cnt,
+					   GFP_KERNEL);
+	if (!*psy->supplied_from) {
+		dev_err(psy->dev, "Couldn't allocate memory for supply list\n");
+		return -ENOMEM;
+	}
+
+	return power_supply_populate_supplied_from(psy);
+}
+#else
+static inline int power_supply_check_supplies(struct power_supply *psy)
+{
+	return 0;
+}
+#endif
+
 static int __power_supply_am_i_supplied(struct device *dev, void *data)
 {
 	union power_supply_propval ret = {0,};
 	struct power_supply *psy = (struct power_supply *)data;
 	struct power_supply *epsy = dev_get_drvdata(dev);
-	int i;
 
-	for (i = 0; i < epsy->num_supplicants; i++) {
-		if (!strcmp(epsy->supplied_to[i], psy->name)) {
-			if (epsy->get_property(epsy,
-				  POWER_SUPPLY_PROP_ONLINE, &ret))
-				continue;
+	if (__power_supply_is_supplied_by(epsy, psy))
+		if (!epsy->get_property(epsy, POWER_SUPPLY_PROP_ONLINE, &ret)) {
 			if (ret.intval)
 				return ret.intval;
 		}
-	}
+
 	return 0;
 }
 
@@ -336,6 +490,12 @@
 
 	INIT_WORK(&psy->changed_work, power_supply_changed_work);
 
+	rc = power_supply_check_supplies(psy);
+	if (rc) {
+		dev_info(dev, "Not all required supplies found, defer probe\n");
+		goto check_supplies_failed;
+	}
+
 	rc = kobject_set_name(&dev->kobj, "%s", psy->name);
 	if (rc)
 		goto kobject_set_name_failed;
@@ -368,6 +528,7 @@
 	device_del(dev);
 kobject_set_name_failed:
 device_add_failed:
+check_supplies_failed:
 	put_device(dev);
 success:
 	return rc;
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 1ae65b8..349e9ae8 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -30,3 +30,10 @@
 	  Some boards don't actually have the ability to power off.
 	  Instead they restart, and u-boot holds the SoC until the
 	  user presses a key. u-boot then boots into Linux.
+
+config POWER_RESET_VEXPRESS
+	bool
+	depends on POWER_RESET
+	help
+	  Power off and reset support for the ARM Ltd. Versatile
+	  Express boards.
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 0f317f5..372807f 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -1,3 +1,4 @@
 obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
 obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
-obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
\ No newline at end of file
+obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
+obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o
diff --git a/arch/arm/mach-vexpress/reset.c b/drivers/power/reset/vexpress-poweroff.c
similarity index 92%
rename from arch/arm/mach-vexpress/reset.c
rename to drivers/power/reset/vexpress-poweroff.c
index 465923a..469e696 100644
--- a/arch/arm/mach-vexpress/reset.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -18,6 +18,8 @@
 #include <linux/stat.h>
 #include <linux/vexpress.h>
 
+#include <asm/system_misc.h>
+
 static void vexpress_reset_do(struct device *dev, const char *what)
 {
 	int err = -ENOENT;
@@ -39,14 +41,14 @@
 
 static struct device *vexpress_power_off_device;
 
-void vexpress_power_off(void)
+static void vexpress_power_off(void)
 {
 	vexpress_reset_do(vexpress_power_off_device, "power off");
 }
 
 static struct device *vexpress_restart_device;
 
-void vexpress_restart(char str, const char *cmd)
+static void vexpress_restart(char str, const char *cmd)
 {
 	vexpress_reset_do(vexpress_restart_device, "restart");
 }
@@ -103,14 +105,17 @@
 	switch (func) {
 	case FUNC_SHUTDOWN:
 		vexpress_power_off_device = &pdev->dev;
+		pm_power_off = vexpress_power_off;
 		break;
 	case FUNC_RESET:
 		if (!vexpress_restart_device)
 			vexpress_restart_device = &pdev->dev;
+		arm_pm_restart = vexpress_restart;
 		device_create_file(&pdev->dev, &dev_attr_active);
 		break;
 	case FUNC_REBOOT:
 		vexpress_restart_device = &pdev->dev;
+		arm_pm_restart = vexpress_restart;
 		device_create_file(&pdev->dev, &dev_attr_active);
 		break;
 	};
diff --git a/drivers/power/rx51_battery.c b/drivers/power/rx51_battery.c
index 8208888..1a1dcb8 100644
--- a/drivers/power/rx51_battery.c
+++ b/drivers/power/rx51_battery.c
@@ -119,7 +119,7 @@
 
 	/* First check for temperature in first direct table */
 	if (raw < ARRAY_SIZE(rx51_temp_table1))
-		return rx51_temp_table1[raw] * 100;
+		return rx51_temp_table1[raw] * 10;
 
 	/* Binary search RAW value in second inverse table */
 	while (max - min > 1) {
@@ -132,7 +132,7 @@
 			break;
 	}
 
-	return (rx51_temp_table2_first - min) * 100;
+	return (rx51_temp_table2_first - min) * 10;
 }
 
 /*
@@ -202,7 +202,7 @@
 	struct rx51_device_info *di;
 	int ret;
 
-	di = kzalloc(sizeof(*di), GFP_KERNEL);
+	di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
 	if (!di)
 		return -ENOMEM;
 
@@ -217,7 +217,6 @@
 	ret = power_supply_register(di->dev, &di->bat);
 	if (ret) {
 		platform_set_drvdata(pdev, NULL);
-		kfree(di);
 		return ret;
 	}
 
@@ -230,7 +229,6 @@
 
 	power_supply_unregister(&di->bat);
 	platform_set_drvdata(pdev, NULL);
-	kfree(di);
 
 	return 0;
 }
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c
index d2ca989..5948ce0 100644
--- a/drivers/power/s3c_adc_battery.c
+++ b/drivers/power/s3c_adc_battery.c
@@ -145,14 +145,17 @@
 
 	int new_level;
 	int full_volt;
-	const struct s3c_adc_bat_thresh *lut = bat->pdata->lut_noac;
-	unsigned int lut_size = bat->pdata->lut_noac_cnt;
+	const struct s3c_adc_bat_thresh *lut;
+	unsigned int lut_size;
 
 	if (!bat) {
 		dev_err(psy->dev, "no battery infos ?!\n");
 		return -EINVAL;
 	}
 
+	lut = bat->pdata->lut_noac;
+	lut_size = bat->pdata->lut_noac_cnt;
+
 	if (bat->volt_value < 0 || bat->cur_value < 0 ||
 		jiffies_to_msecs(jiffies - bat->timestamp) >
 			BAT_POLL_INTERVAL) {
diff --git a/drivers/power/sbs-battery.c b/drivers/power/sbs-battery.c
index 3960f0b..c8c78a7 100644
--- a/drivers/power/sbs-battery.c
+++ b/drivers/power/sbs-battery.c
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/gpio.h>
+#include <linux/of.h>
 
 #include <linux/power/sbs-battery.h>
 
@@ -667,7 +668,6 @@
 	return pdata;
 }
 #else
-#define sbs_dt_ids NULL
 static struct sbs_platform_data *sbs_of_populate_pdata(
 	struct i2c_client *client)
 {
@@ -820,10 +820,11 @@
 	return 0;
 }
 
-#if defined CONFIG_PM
-static int sbs_suspend(struct i2c_client *client,
-	pm_message_t state)
+#if defined CONFIG_PM_SLEEP
+
+static int sbs_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct sbs_info *chip = i2c_get_clientdata(client);
 	s32 ret;
 
@@ -838,11 +839,13 @@
 
 	return 0;
 }
+
+static SIMPLE_DEV_PM_OPS(sbs_pm_ops, sbs_suspend, NULL);
+#define SBS_PM_OPS (&sbs_pm_ops)
+
 #else
-#define sbs_suspend		NULL
+#define SBS_PM_OPS NULL
 #endif
-/* any smbus transaction will wake up sbs */
-#define sbs_resume		NULL
 
 static const struct i2c_device_id sbs_id[] = {
 	{ "bq20z75", 0 },
@@ -854,12 +857,11 @@
 static struct i2c_driver sbs_battery_driver = {
 	.probe		= sbs_probe,
 	.remove		= sbs_remove,
-	.suspend	= sbs_suspend,
-	.resume		= sbs_resume,
 	.id_table	= sbs_id,
 	.driver = {
 		.name	= "sbs-battery",
-		.of_match_table = sbs_dt_ids,
+		.of_match_table = of_match_ptr(sbs_dt_ids),
+		.pm	= SBS_PM_OPS,
 	},
 };
 module_i2c_driver(sbs_battery_driver);
diff --git a/drivers/power/test_power.c b/drivers/power/test_power.c
index b99a452..0152f35 100644
--- a/drivers/power/test_power.c
+++ b/drivers/power/test_power.c
@@ -30,6 +30,8 @@
 static int battery_capacity		= 50;
 static int battery_voltage		= 3300;
 
+static bool module_initialized;
+
 static int test_power_get_ac_property(struct power_supply *psy,
 				      enum power_supply_property psp,
 				      union power_supply_propval *val)
@@ -185,6 +187,7 @@
 		}
 	}
 
+	module_initialized = true;
 	return 0;
 failed:
 	while (--i >= 0)
@@ -209,6 +212,8 @@
 
 	for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++)
 		power_supply_unregister(&test_power_supplies[i]);
+
+	module_initialized = false;
 }
 module_exit(test_power_exit);
 
@@ -221,8 +226,8 @@
 };
 
 static struct battery_property_map map_ac_online[] = {
-	{ 0,  "on"  },
-	{ 1,  "off" },
+	{ 0,  "off"  },
+	{ 1,  "on" },
 	{ -1, NULL  },
 };
 
@@ -295,10 +300,16 @@
 	return def_key;
 }
 
+static inline void signal_power_supply_changed(struct power_supply *psy)
+{
+	if (module_initialized)
+		power_supply_changed(psy);
+}
+
 static int param_set_ac_online(const char *key, const struct kernel_param *kp)
 {
 	ac_online = map_get_value(map_ac_online, key, ac_online);
-	power_supply_changed(&test_power_supplies[0]);
+	signal_power_supply_changed(&test_power_supplies[0]);
 	return 0;
 }
 
@@ -311,7 +322,7 @@
 static int param_set_usb_online(const char *key, const struct kernel_param *kp)
 {
 	usb_online = map_get_value(map_ac_online, key, usb_online);
-	power_supply_changed(&test_power_supplies[2]);
+	signal_power_supply_changed(&test_power_supplies[2]);
 	return 0;
 }
 
@@ -325,7 +336,7 @@
 					const struct kernel_param *kp)
 {
 	battery_status = map_get_value(map_status, key, battery_status);
-	power_supply_changed(&test_power_supplies[1]);
+	signal_power_supply_changed(&test_power_supplies[1]);
 	return 0;
 }
 
@@ -339,7 +350,7 @@
 					const struct kernel_param *kp)
 {
 	battery_health = map_get_value(map_health, key, battery_health);
-	power_supply_changed(&test_power_supplies[1]);
+	signal_power_supply_changed(&test_power_supplies[1]);
 	return 0;
 }
 
@@ -353,7 +364,7 @@
 					const struct kernel_param *kp)
 {
 	battery_present = map_get_value(map_present, key, battery_present);
-	power_supply_changed(&test_power_supplies[0]);
+	signal_power_supply_changed(&test_power_supplies[0]);
 	return 0;
 }
 
@@ -369,7 +380,7 @@
 {
 	battery_technology = map_get_value(map_technology, key,
 						battery_technology);
-	power_supply_changed(&test_power_supplies[1]);
+	signal_power_supply_changed(&test_power_supplies[1]);
 	return 0;
 }
 
@@ -390,7 +401,7 @@
 		return -EINVAL;
 
 	battery_capacity = tmp;
-	power_supply_changed(&test_power_supplies[1]);
+	signal_power_supply_changed(&test_power_supplies[1]);
 	return 0;
 }
 
@@ -405,7 +416,7 @@
 		return -EINVAL;
 
 	battery_voltage = tmp;
-	power_supply_changed(&test_power_supplies[1]);
+	signal_power_supply_changed(&test_power_supplies[1]);
 	return 0;
 }
 
diff --git a/drivers/power/tps65090-charger.c b/drivers/power/tps65090-charger.c
new file mode 100644
index 0000000..9fbca31
--- /dev/null
+++ b/drivers/power/tps65090-charger.c
@@ -0,0 +1,320 @@
+/*
+ * Battery charger driver for TI's tps65090
+ *
+ * Copyright (c) 2013, NVIDIA CORPORATION.  All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/mfd/tps65090.h>
+
+#define TPS65090_REG_INTR_STS	0x00
+#define TPS65090_REG_CG_CTRL0	0x04
+#define TPS65090_REG_CG_CTRL1	0x05
+#define TPS65090_REG_CG_CTRL2	0x06
+#define TPS65090_REG_CG_CTRL3	0x07
+#define TPS65090_REG_CG_CTRL4	0x08
+#define TPS65090_REG_CG_CTRL5	0x09
+#define TPS65090_REG_CG_STATUS1	0x0a
+#define TPS65090_REG_CG_STATUS2	0x0b
+
+#define TPS65090_CHARGER_ENABLE	BIT(0)
+#define TPS65090_VACG		BIT(1)
+#define TPS65090_NOITERM	BIT(5)
+
+struct tps65090_charger {
+	struct	device	*dev;
+	int	ac_online;
+	int	prev_ac_online;
+	int	irq;
+	struct power_supply	ac;
+	struct tps65090_platform_data *pdata;
+};
+
+static enum power_supply_property tps65090_ac_props[] = {
+	POWER_SUPPLY_PROP_ONLINE,
+};
+
+static int tps65090_low_chrg_current(struct tps65090_charger *charger)
+{
+	int ret;
+
+	ret = tps65090_write(charger->dev->parent, TPS65090_REG_CG_CTRL5,
+			TPS65090_NOITERM);
+	if (ret < 0) {
+		dev_err(charger->dev, "%s(): error reading in register 0x%x\n",
+			__func__, TPS65090_REG_CG_CTRL5);
+		return ret;
+	}
+	return 0;
+}
+
+static int tps65090_enable_charging(struct tps65090_charger *charger,
+	uint8_t enable)
+{
+	int ret;
+	uint8_t ctrl0 = 0;
+
+	ret = tps65090_read(charger->dev->parent, TPS65090_REG_CG_CTRL0,
+			    &ctrl0);
+	if (ret < 0) {
+		dev_err(charger->dev, "%s(): error reading in register 0x%x\n",
+				__func__, TPS65090_REG_CG_CTRL0);
+		return ret;
+	}
+
+	ret = tps65090_write(charger->dev->parent, TPS65090_REG_CG_CTRL0,
+				(ctrl0 | TPS65090_CHARGER_ENABLE));
+	if (ret < 0) {
+		dev_err(charger->dev, "%s(): error reading in register 0x%x\n",
+				__func__, TPS65090_REG_CG_CTRL0);
+		return ret;
+	}
+	return 0;
+}
+
+static int tps65090_config_charger(struct tps65090_charger *charger)
+{
+	int ret;
+
+	if (charger->pdata->enable_low_current_chrg) {
+		ret = tps65090_low_chrg_current(charger);
+		if (ret < 0) {
+			dev_err(charger->dev,
+				"error configuring low charge current\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int tps65090_ac_get_property(struct power_supply *psy,
+			enum power_supply_property psp,
+			union power_supply_propval *val)
+{
+	struct tps65090_charger *charger = container_of(psy,
+					struct tps65090_charger, ac);
+
+	if (psp == POWER_SUPPLY_PROP_ONLINE) {
+		val->intval = charger->ac_online;
+		charger->prev_ac_online = charger->ac_online;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static irqreturn_t tps65090_charger_isr(int irq, void *dev_id)
+{
+	struct tps65090_charger *charger = dev_id;
+	int ret;
+	uint8_t status1 = 0;
+	uint8_t intrsts = 0;
+
+	ret = tps65090_read(charger->dev->parent, TPS65090_REG_CG_STATUS1,
+			    &status1);
+	if (ret < 0) {
+		dev_err(charger->dev, "%s(): Error in reading reg 0x%x\n",
+				__func__, TPS65090_REG_CG_STATUS1);
+		return IRQ_HANDLED;
+	}
+	msleep(75);
+	ret = tps65090_read(charger->dev->parent, TPS65090_REG_INTR_STS,
+			    &intrsts);
+	if (ret < 0) {
+		dev_err(charger->dev, "%s(): Error in reading reg 0x%x\n",
+				__func__, TPS65090_REG_INTR_STS);
+		return IRQ_HANDLED;
+	}
+
+	if (intrsts & TPS65090_VACG) {
+		ret = tps65090_enable_charging(charger, 1);
+		if (ret < 0)
+			return IRQ_HANDLED;
+		charger->ac_online = 1;
+	} else {
+		charger->ac_online = 0;
+	}
+
+	if (charger->prev_ac_online != charger->ac_online)
+		power_supply_changed(&charger->ac);
+
+	return IRQ_HANDLED;
+}
+
+#if defined(CONFIG_OF)
+
+#include <linux/of_device.h>
+
+static struct tps65090_platform_data *
+		tps65090_parse_dt_charger_data(struct platform_device *pdev)
+{
+	struct tps65090_platform_data *pdata;
+	struct device_node *np = pdev->dev.of_node;
+	unsigned int prop;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata) {
+		dev_err(&pdev->dev, "Memory alloc for tps65090_pdata failed\n");
+		return NULL;
+	}
+
+	prop = of_property_read_bool(np, "ti,enable-low-current-chrg");
+	pdata->enable_low_current_chrg = prop;
+
+	pdata->irq_base = -1;
+
+	return pdata;
+
+}
+#else
+static struct tps65090_platform_data *
+		tps65090_parse_dt_charger_data(struct platform_device *pdev)
+{
+	return NULL;
+}
+#endif
+
+static int tps65090_charger_probe(struct platform_device *pdev)
+{
+	struct tps65090_charger *cdata;
+	struct tps65090_platform_data *pdata;
+	uint8_t status1 = 0;
+	int ret;
+	int irq;
+
+	pdata = dev_get_platdata(pdev->dev.parent);
+
+	if (!pdata && pdev->dev.of_node)
+		pdata = tps65090_parse_dt_charger_data(pdev);
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "%s():no platform data available\n",
+				__func__);
+		return -ENODEV;
+	}
+
+	cdata = devm_kzalloc(&pdev->dev, sizeof(*cdata), GFP_KERNEL);
+	if (!cdata) {
+		dev_err(&pdev->dev, "failed to allocate memory status\n");
+		return -ENOMEM;
+	}
+
+	dev_set_drvdata(&pdev->dev, cdata);
+
+	cdata->dev			= &pdev->dev;
+	cdata->pdata			= pdata;
+
+	cdata->ac.name			= "tps65090-ac";
+	cdata->ac.type			= POWER_SUPPLY_TYPE_MAINS;
+	cdata->ac.get_property		= tps65090_ac_get_property;
+	cdata->ac.properties		= tps65090_ac_props;
+	cdata->ac.num_properties	= ARRAY_SIZE(tps65090_ac_props);
+	cdata->ac.supplied_to		= pdata->supplied_to;
+	cdata->ac.num_supplicants	= pdata->num_supplicants;
+
+	ret = power_supply_register(&pdev->dev, &cdata->ac);
+	if (ret) {
+		dev_err(&pdev->dev, "failed: power supply register\n");
+		return ret;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_warn(&pdev->dev, "Unable to get charger irq = %d\n", irq);
+		ret = irq;
+		goto fail_unregister_supply;
+	}
+
+	cdata->irq = irq;
+
+	ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+		tps65090_charger_isr, 0, "tps65090-charger", cdata);
+	if (ret) {
+		dev_err(cdata->dev, "Unable to register irq %d err %d\n", irq,
+			ret);
+		goto fail_free_irq;
+	}
+
+	ret = tps65090_config_charger(cdata);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "charger config failed, err %d\n", ret);
+		goto fail_free_irq;
+	}
+
+	/* Check for charger presence */
+	ret = tps65090_read(cdata->dev->parent, TPS65090_REG_CG_STATUS1,
+			&status1);
+	if (ret < 0) {
+		dev_err(cdata->dev, "%s(): Error in reading reg 0x%x", __func__,
+			TPS65090_REG_CG_STATUS1);
+		goto fail_free_irq;
+	}
+
+	if (status1 != 0) {
+		ret = tps65090_enable_charging(cdata, 1);
+		if (ret < 0) {
+			dev_err(cdata->dev, "error enabling charger\n");
+			goto fail_free_irq;
+		}
+		cdata->ac_online = 1;
+		power_supply_changed(&cdata->ac);
+	}
+
+	return 0;
+
+fail_free_irq:
+	devm_free_irq(cdata->dev, irq, cdata);
+fail_unregister_supply:
+	power_supply_unregister(&cdata->ac);
+
+	return ret;
+}
+
+static int tps65090_charger_remove(struct platform_device *pdev)
+{
+	struct tps65090_charger *cdata = dev_get_drvdata(&pdev->dev);
+
+	devm_free_irq(cdata->dev, cdata->irq, cdata);
+	power_supply_unregister(&cdata->ac);
+
+	return 0;
+}
+
+static struct of_device_id of_tps65090_charger_match[] = {
+	{ .compatible = "ti,tps65090-charger", },
+	{ /* end */ }
+};
+
+static struct platform_driver tps65090_charger_driver = {
+	.driver	= {
+		.name	= "tps65090-charger",
+		.of_match_table = of_tps65090_charger_match,
+		.owner	= THIS_MODULE,
+	},
+	.probe	= tps65090_charger_probe,
+	.remove = tps65090_charger_remove,
+};
+module_platform_driver(tps65090_charger_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Syed Rafiuddin <srafiuddin@nvidia.com>");
+MODULE_DESCRIPTION("tps65090 battery charger driver");
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index a69d0d1..bed4581 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -636,17 +636,7 @@
 	.remove	= __exit_p(twl4030_bci_remove),
 };
 
-static int __init twl4030_bci_init(void)
-{
-	return platform_driver_probe(&twl4030_bci_driver, twl4030_bci_probe);
-}
-module_init(twl4030_bci_init);
-
-static void __exit twl4030_bci_exit(void)
-{
-	platform_driver_unregister(&twl4030_bci_driver);
-}
-module_exit(twl4030_bci_exit);
+module_platform_driver_probe(twl4030_bci_driver, twl4030_bci_probe);
 
 MODULE_AUTHOR("Gražvydas Ignotas");
 MODULE_DESCRIPTION("TWL4030 Battery Charger Interface driver");
diff --git a/drivers/power/wm831x_backup.c b/drivers/power/wm831x_backup.c
index d9cc169..58cbb00 100644
--- a/drivers/power/wm831x_backup.c
+++ b/drivers/power/wm831x_backup.c
@@ -169,7 +169,8 @@
 	struct power_supply *backup;
 	int ret;
 
-	devdata = kzalloc(sizeof(struct wm831x_backup), GFP_KERNEL);
+	devdata = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_backup),
+				GFP_KERNEL);
 	if (devdata == NULL)
 		return -ENOMEM;
 
@@ -197,14 +198,8 @@
 	backup->num_properties = ARRAY_SIZE(wm831x_backup_props);
 	backup->get_property = wm831x_backup_get_prop;
 	ret = power_supply_register(&pdev->dev, backup);
-	if (ret)
-		goto err_kmalloc;
 
 	return ret;
-
-err_kmalloc:
-	kfree(devdata);
-	return ret;
 }
 
 static int wm831x_backup_remove(struct platform_device *pdev)
@@ -213,7 +208,6 @@
 
 	power_supply_unregister(&devdata->backup);
 	kfree(devdata->backup.name);
-	kfree(devdata);
 
 	return 0;
 }
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index a6f7190..9323d05 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
  *----------------------------------------------------------------------------*/
 
 #ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 30000
+# define AAC_DRIVER_BUILD 30200
 # define AAC_DRIVER_BRANCH "-ms"
 #endif
 #define MAXIMUM_NUM_CONTAINERS	32
@@ -1918,6 +1918,10 @@
 #define	MONITOR_PANIC			0x00000020
 #define	KERNEL_UP_AND_RUNNING		0x00000080
 #define	KERNEL_PANIC			0x00000100
+#define	FLASH_UPD_PENDING		0x00002000
+#define	FLASH_UPD_SUCCESS		0x00004000
+#define	FLASH_UPD_FAILED		0x00008000
+#define	FWUPD_TIMEOUT			(5 * 60)
 
 /*
  *	Doorbell bit defines
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 3f75995..177b094 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -214,7 +214,7 @@
 	cmd = (struct aac_close *) fib_data(fibctx);
 
 	cmd->command = cpu_to_le32(VM_CloseAll);
-	cmd->cid = cpu_to_le32(0xffffffff);
+	cmd->cid = cpu_to_le32(0xfffffffe);
 
 	status = aac_fib_send(ContainerCommand,
 			  fibctx,
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index e2e3492..0f56d8d 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -703,6 +703,28 @@
 		!aac_src_restart_adapter(dev, 0))
 		++restart;
 	/*
+	 *	Check to see if flash update is running.
+	 *	Wait for the adapter to be up and running. Wait up to 5 minutes
+	 */
+	status = src_readl(dev, MUnit.OMR);
+	if (status & FLASH_UPD_PENDING) {
+		start = jiffies;
+		do {
+			status = src_readl(dev, MUnit.OMR);
+			if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) {
+				printk(KERN_ERR "%s%d: adapter flash update failed.\n",
+					dev->name, instance);
+				goto error_iounmap;
+			}
+		} while (!(status & FLASH_UPD_SUCCESS) &&
+			 !(status & FLASH_UPD_FAILED));
+		/* Delay 10 seconds.
+		 * Because right now FW is doing a soft reset,
+		 * do not read scratch pad register at this time
+		 */
+		ssleep(10);
+	}
+	/*
 	 *	Check to see if the board panic'd while booting.
 	 */
 	status = src_readl(dev, MUnit.OMR);
@@ -730,7 +752,9 @@
 	/*
 	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
 	 */
-	while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING)) {
+	while (!((status = src_readl(dev, MUnit.OMR)) &
+		KERNEL_UP_AND_RUNNING) ||
+		status == 0xffffffff) {
 		if ((restart &&
 		  (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
 		  time_after(jiffies, start+HZ*startup_timeout)) {
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index bdd78fb..7dbaf58 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -3892,7 +3892,6 @@
 			  struct csio_fl_dma_buf *flb, void *priv)
 {
 	__u8 op;
-	__be64 *data;
 	void *msg = NULL;
 	uint32_t msg_len = 0;
 	bool msg_sg = 0;
@@ -3908,8 +3907,6 @@
 		msg = (void *) flb;
 		msg_len = flb->totlen;
 		msg_sg = 1;
-
-		data = (__be64 *) msg;
 	} else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) {
 
 		CSIO_INC_STATS(hw, n_cpl_fw6_msg);
@@ -3917,8 +3914,6 @@
 		msg = (void *)((uintptr_t)wr + sizeof(__be64));
 		msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) :
 			   sizeof(struct cpl_fw4_msg);
-
-		data = (__be64 *) msg;
 	} else {
 		csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op);
 		CSIO_INC_STATS(hw, n_cpl_unexp);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 6f4d8e6..68adb89 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -232,13 +232,13 @@
 	struct scsi_sense_hdr sense_hdr;
 	unsigned err = SCSI_DH_OK;
 
-	if (error || host_byte(req->errors) != DID_OK ||
-			msg_byte(req->errors) != COMMAND_COMPLETE) {
+	if (host_byte(req->errors) != DID_OK ||
+	    msg_byte(req->errors) != COMMAND_COMPLETE) {
 		err = SCSI_DH_IO;
 		goto done;
 	}
 
-	if (h->senselen > 0) {
+	if (req->sense_len > 0) {
 		err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
 					   &sense_hdr);
 		if (!err) {
@@ -255,7 +255,9 @@
 			    ALUA_DH_NAME, sense_hdr.sense_key,
 			    sense_hdr.asc, sense_hdr.ascq);
 		err = SCSI_DH_IO;
-	}
+	} else if (error)
+		err = SCSI_DH_IO;
+
 	if (err == SCSI_DH_OK) {
 		h->state = TPGS_STATE_OPTIMIZED;
 		sdev_printk(KERN_INFO, h->sdev,
@@ -710,6 +712,10 @@
 	return result;
 }
 
+static uint optimize_stpg;
+module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0.");
+
 /*
  * alua_activate - activate a path
  * @sdev: device on the path to be activated
@@ -731,6 +737,9 @@
 	if (err != SCSI_DH_OK)
 		goto out;
 
+	if (optimize_stpg)
+		h->flags |= ALUA_OPTIMIZE_STPG;
+
 	if (h->tpgs & TPGS_MODE_EXPLICIT) {
 		switch (h->state) {
 		case TPGS_STATE_NONOPTIMIZED:
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 2839baa..d25d0d8 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -721,7 +721,7 @@
 	}
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int isci_suspend(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
@@ -770,18 +770,16 @@
 
 	return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(isci_pm_ops, isci_suspend, isci_resume);
-#endif
 
 static struct pci_driver isci_pci_driver = {
 	.name		= DRV_NAME,
 	.id_table	= isci_id_table,
 	.probe		= isci_pci_probe,
 	.remove		= isci_pci_remove,
-#ifdef CONFIG_PM
 	.driver.pm      = &isci_pm_ops,
-#endif
 };
 
 static __init int isci_init(void)
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 1b91ca0..9e2588a 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -370,17 +370,24 @@
 static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
 {
 	struct iscsi_conn *conn = task->conn;
-	int rc;
+	unsigned long pflags = current->flags;
+	int rc = 0;
+
+	current->flags |= PF_MEMALLOC;
 
 	while (iscsi_sw_tcp_xmit_qlen(conn)) {
 		rc = iscsi_sw_tcp_xmit(conn);
-		if (rc == 0)
-			return -EAGAIN;
+		if (rc == 0) {
+			rc = -EAGAIN;
+			break;
+		}
 		if (rc < 0)
-			return rc;
+			break;
+		rc = 0;
 	}
 
-	return 0;
+	tsk_restore_flags(current, pflags, PF_MEMALLOC);
+	return rc;
 }
 
 /*
@@ -665,6 +672,7 @@
 	sk->sk_reuse = SK_CAN_REUSE;
 	sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
 	sk->sk_allocation = GFP_ATOMIC;
+	sk_set_memalloc(sk);
 
 	iscsi_sw_tcp_conn_set_callbacks(conn);
 	tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 82c3fd4..5de9469 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -507,7 +507,6 @@
 	kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*));
 
 	if (sc) {
-		task->sc = NULL;
 		/* SCSI eh reuses commands to verify us */
 		sc->SCp.ptr = NULL;
 		/*
@@ -3142,7 +3141,7 @@
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_bind);
 
-static int iscsi_switch_str_param(char **param, char *new_val_buf)
+int iscsi_switch_str_param(char **param, char *new_val_buf)
 {
 	char *new_val;
 
@@ -3159,6 +3158,7 @@
 	*param = new_val;
 	return 0;
 }
+EXPORT_SYMBOL_GPL(iscsi_switch_str_param);
 
 int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
 		    enum iscsi_param param, char *buf, int buflen)
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index a364cae..9290713 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -692,7 +692,7 @@
 	 */
 	for (i = 0; i < psli->num_rings; i++) {
 		pring = &psli->ring[i];
-		while (pring->txcmplq_cnt) {
+		while (!list_empty(&pring->txcmplq)) {
 			msleep(10);
 			if (cnt++ > 500) {  /* 5 secs */
 				lpfc_printf_log(phba,
@@ -2302,11 +2302,17 @@
 LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2,
 	"FCF Fast failover=1 Priority failover=2");
 
-int lpfc_enable_rrq;
+int lpfc_enable_rrq = 2;
 module_param(lpfc_enable_rrq, int, S_IRUGO);
 MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
 lpfc_param_show(enable_rrq);
-lpfc_param_init(enable_rrq, 0, 0, 1);
+/*
+# lpfc_enable_rrq: Track XRI/OXID reuse after IO failures
+#	0x0 = disabled, XRI/OXID use not tracked.
+#	0x1 = XRI/OXID reuse is timed with ratov, RRQ sent.
+#	0x2 = XRI/OXID reuse is timed with ratov, No RRQ sent.
+*/
+lpfc_param_init(enable_rrq, 2, 0, 2);
 static DEVICE_ATTR(lpfc_enable_rrq, S_IRUGO, lpfc_enable_rrq_show, NULL);
 
 /*
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f5d1064..8886668 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -64,18 +64,14 @@
 	struct list_head events_to_get;
 	struct list_head events_to_see;
 
-	/* job waiting for this event to finish */
-	struct fc_bsg_job *set_job;
+	/* driver data associated with the job */
+	void *dd_data;
 };
 
 struct lpfc_bsg_iocb {
 	struct lpfc_iocbq *cmdiocbq;
-	struct lpfc_iocbq *rspiocbq;
-	struct lpfc_dmabuf *bmp;
+	struct lpfc_dmabuf *rmp;
 	struct lpfc_nodelist *ndlp;
-
-	/* job waiting for this iocb to finish */
-	struct fc_bsg_job *set_job;
 };
 
 struct lpfc_bsg_mbox {
@@ -86,20 +82,13 @@
 	uint32_t mbOffset; /* from app */
 	uint32_t inExtWLen; /* from app */
 	uint32_t outExtWLen; /* from app */
-
-	/* job waiting for this mbox command to finish */
-	struct fc_bsg_job *set_job;
 };
 
 #define MENLO_DID 0x0000FC0E
 
 struct lpfc_bsg_menlo {
 	struct lpfc_iocbq *cmdiocbq;
-	struct lpfc_iocbq *rspiocbq;
-	struct lpfc_dmabuf *bmp;
-
-	/* job waiting for this iocb to finish */
-	struct fc_bsg_job *set_job;
+	struct lpfc_dmabuf *rmp;
 };
 
 #define TYPE_EVT 	1
@@ -108,6 +97,7 @@
 #define TYPE_MENLO	4
 struct bsg_job_data {
 	uint32_t type;
+	struct fc_bsg_job *set_job; /* job waiting for this iocb to finish */
 	union {
 		struct lpfc_bsg_event *evt;
 		struct lpfc_bsg_iocb iocb;
@@ -141,6 +131,138 @@
 	uint32_t flag;
 };
 
+static void
+lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
+{
+	struct lpfc_dmabuf *mlast, *next_mlast;
+
+	if (mlist) {
+		list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
+					 list) {
+			lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
+			list_del(&mlast->list);
+			kfree(mlast);
+		}
+		lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
+		kfree(mlist);
+	}
+	return;
+}
+
+static struct lpfc_dmabuf *
+lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
+		       int outbound_buffers, struct ulp_bde64 *bpl,
+		       int *bpl_entries)
+{
+	struct lpfc_dmabuf *mlist = NULL;
+	struct lpfc_dmabuf *mp;
+	unsigned int bytes_left = size;
+
+	/* Verify we can support the size specified */
+	if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
+		return NULL;
+
+	/* Determine the number of dma buffers to allocate */
+	*bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
+			size/LPFC_BPL_SIZE);
+
+	/* Allocate dma buffer and place in BPL passed */
+	while (bytes_left) {
+		/* Allocate dma buffer  */
+		mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+		if (!mp) {
+			if (mlist)
+				lpfc_free_bsg_buffers(phba, mlist);
+			return NULL;
+		}
+
+		INIT_LIST_HEAD(&mp->list);
+		mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
+
+		if (!mp->virt) {
+			kfree(mp);
+			if (mlist)
+				lpfc_free_bsg_buffers(phba, mlist);
+			return NULL;
+		}
+
+		/* Queue it to a linked list */
+		if (!mlist)
+			mlist = mp;
+		else
+			list_add_tail(&mp->list, &mlist->list);
+
+		/* Add buffer to buffer pointer list */
+		if (outbound_buffers)
+			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		else
+			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+		bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
+		bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
+		bpl->tus.f.bdeSize = (uint16_t)
+			(bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
+			 bytes_left);
+		bytes_left -= bpl->tus.f.bdeSize;
+		bpl->tus.w = le32_to_cpu(bpl->tus.w);
+		bpl++;
+	}
+	return mlist;
+}
+
+static unsigned int
+lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
+		   struct fc_bsg_buffer *bsg_buffers,
+		   unsigned int bytes_to_transfer, int to_buffers)
+{
+
+	struct lpfc_dmabuf *mp;
+	unsigned int transfer_bytes, bytes_copied = 0;
+	unsigned int sg_offset, dma_offset;
+	unsigned char *dma_address, *sg_address;
+	struct scatterlist *sgel;
+	LIST_HEAD(temp_list);
+
+
+	list_splice_init(&dma_buffers->list, &temp_list);
+	list_add(&dma_buffers->list, &temp_list);
+	sg_offset = 0;
+	sgel = bsg_buffers->sg_list;
+	list_for_each_entry(mp, &temp_list, list) {
+		dma_offset = 0;
+		while (bytes_to_transfer && sgel &&
+		       (dma_offset < LPFC_BPL_SIZE)) {
+			dma_address = mp->virt + dma_offset;
+			if (sg_offset) {
+				/* Continue previous partial transfer of sg */
+				sg_address = sg_virt(sgel) + sg_offset;
+				transfer_bytes = sgel->length - sg_offset;
+			} else {
+				sg_address = sg_virt(sgel);
+				transfer_bytes = sgel->length;
+			}
+			if (bytes_to_transfer < transfer_bytes)
+				transfer_bytes = bytes_to_transfer;
+			if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
+				transfer_bytes = LPFC_BPL_SIZE - dma_offset;
+			if (to_buffers)
+				memcpy(dma_address, sg_address, transfer_bytes);
+			else
+				memcpy(sg_address, dma_address, transfer_bytes);
+			dma_offset += transfer_bytes;
+			sg_offset += transfer_bytes;
+			bytes_to_transfer -= transfer_bytes;
+			bytes_copied += transfer_bytes;
+			if (sg_offset >= sgel->length) {
+				sg_offset = 0;
+				sgel = sg_next(sgel);
+			}
+		}
+	}
+	list_del_init(&dma_buffers->list);
+	list_splice(&temp_list, &dma_buffers->list);
+	return bytes_copied;
+}
+
 /**
  * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
  * @phba: Pointer to HBA context object.
@@ -166,62 +288,72 @@
 	struct bsg_job_data *dd_data;
 	struct fc_bsg_job *job;
 	IOCB_t *rsp;
-	struct lpfc_dmabuf *bmp;
+	struct lpfc_dmabuf *bmp, *cmp, *rmp;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_bsg_iocb *iocb;
 	unsigned long flags;
+	unsigned int rsp_size;
 	int rc = 0;
 
+	dd_data = cmdiocbq->context1;
+
+	/* Determine if job has been aborted */
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
-	dd_data = cmdiocbq->context2;
-	if (!dd_data) {
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		lpfc_sli_release_iocbq(phba, cmdiocbq);
-		return;
+	job = dd_data->set_job;
+	if (job) {
+		/* Prevent timeout handling from trying to abort job */
+		job->dd_data = NULL;
 	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
 	iocb = &dd_data->context_un.iocb;
-	job = iocb->set_job;
-	job->dd_data = NULL; /* so timeout handler does not reply */
-
-	bmp = iocb->bmp;
+	ndlp = iocb->ndlp;
+	rmp = iocb->rmp;
+	cmp = cmdiocbq->context2;
+	bmp = cmdiocbq->context3;
 	rsp = &rspiocbq->iocb;
-	ndlp = cmdiocbq->context1;
 
-	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
-		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
-		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	/* Copy the completed data or set the error status */
 
-	if (rsp->ulpStatus) {
-		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
-			switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
-			case IOERR_SEQUENCE_TIMEOUT:
-				rc = -ETIMEDOUT;
-				break;
-			case IOERR_INVALID_RPI:
-				rc = -EFAULT;
-				break;
-			default:
+	if (job) {
+		if (rsp->ulpStatus) {
+			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+				case IOERR_SEQUENCE_TIMEOUT:
+					rc = -ETIMEDOUT;
+					break;
+				case IOERR_INVALID_RPI:
+					rc = -EFAULT;
+					break;
+				default:
+					rc = -EACCES;
+					break;
+				}
+			} else {
 				rc = -EACCES;
-				break;
 			}
-		} else
-			rc = -EACCES;
-	} else
-		job->reply->reply_payload_rcv_len =
-			rsp->un.genreq64.bdl.bdeSize;
+		} else {
+			rsp_size = rsp->un.genreq64.bdl.bdeSize;
+			job->reply->reply_payload_rcv_len =
+				lpfc_bsg_copy_data(rmp, &job->reply_payload,
+						   rsp_size, 0);
+		}
+	}
 
+	lpfc_free_bsg_buffers(phba, cmp);
+	lpfc_free_bsg_buffers(phba, rmp);
 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+	kfree(bmp);
 	lpfc_sli_release_iocbq(phba, cmdiocbq);
 	lpfc_nlp_put(ndlp);
-	kfree(bmp);
 	kfree(dd_data);
-	/* make error code available to userspace */
-	job->reply->result = rc;
-	/* complete the job back to userspace */
-	job->job_done(job);
-	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/* Complete the job if the job is still active */
+
+	if (job) {
+		job->reply->result = rc;
+		job->job_done(job);
+	}
 	return;
 }
 
@@ -240,12 +372,9 @@
 	uint32_t timeout;
 	struct lpfc_iocbq *cmdiocbq = NULL;
 	IOCB_t *cmd;
-	struct lpfc_dmabuf *bmp = NULL;
+	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
 	int request_nseg;
 	int reply_nseg;
-	struct scatterlist *sgel = NULL;
-	int numbde;
-	dma_addr_t busaddr;
 	struct bsg_job_data *dd_data;
 	uint32_t creg_val;
 	int rc = 0;
@@ -268,54 +397,50 @@
 		goto no_ndlp;
 	}
 
-	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
-	if (!bmp) {
-		rc = -ENOMEM;
-		goto free_ndlp;
-	}
-
 	if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
 		rc = -ENODEV;
-		goto free_bmp;
+		goto free_ndlp;
 	}
 
 	cmdiocbq = lpfc_sli_get_iocbq(phba);
 	if (!cmdiocbq) {
 		rc = -ENOMEM;
-		goto free_bmp;
+		goto free_ndlp;
 	}
 
 	cmd = &cmdiocbq->iocb;
-	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
-	if (!bmp->virt) {
+
+	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!bmp) {
 		rc = -ENOMEM;
 		goto free_cmdiocbq;
 	}
-
-	INIT_LIST_HEAD(&bmp->list);
-	bpl = (struct ulp_bde64 *) bmp->virt;
-	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
-				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
-		busaddr = sg_dma_address(sgel);
-		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
-		bpl->tus.f.bdeSize = sg_dma_len(sgel);
-		bpl->tus.w = cpu_to_le32(bpl->tus.w);
-		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
-		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
-		bpl++;
+	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+	if (!bmp->virt) {
+		rc = -ENOMEM;
+		goto free_bmp;
 	}
 
-	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
-				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
-		busaddr = sg_dma_address(sgel);
-		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
-		bpl->tus.f.bdeSize = sg_dma_len(sgel);
-		bpl->tus.w = cpu_to_le32(bpl->tus.w);
-		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
-		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
-		bpl++;
+	INIT_LIST_HEAD(&bmp->list);
+
+	bpl = (struct ulp_bde64 *) bmp->virt;
+	request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
+	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
+				     1, bpl, &request_nseg);
+	if (!cmp) {
+		rc = -ENOMEM;
+		goto free_bmp;
+	}
+	lpfc_bsg_copy_data(cmp, &job->request_payload,
+			   job->request_payload.payload_len, 1);
+
+	bpl += request_nseg;
+	reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
+	rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
+				     bpl, &reply_nseg);
+	if (!rmp) {
+		rc = -ENOMEM;
+		goto free_cmp;
 	}
 
 	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
@@ -343,17 +468,20 @@
 	cmd->ulpTimeout = timeout;
 
 	cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
-	cmdiocbq->context1 = ndlp;
-	cmdiocbq->context2 = dd_data;
+	cmdiocbq->context1 = dd_data;
+	cmdiocbq->context2 = cmp;
+	cmdiocbq->context3 = bmp;
 	dd_data->type = TYPE_IOCB;
+	dd_data->set_job = job;
 	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
-	dd_data->context_un.iocb.set_job = job;
-	dd_data->context_un.iocb.bmp = bmp;
+	dd_data->context_un.iocb.ndlp = ndlp;
+	dd_data->context_un.iocb.rmp = rmp;
+	job->dd_data = dd_data;
 
 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
 			rc = -EIO ;
-			goto free_cmdiocbq;
+			goto free_rmp;
 		}
 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
 		writel(creg_val, phba->HCregaddr);
@@ -368,19 +496,18 @@
 	else
 		rc = -EIO;
 
-
 	/* iocb failed so cleanup */
-	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
-		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
-		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
 
-	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-
+free_rmp:
+	lpfc_free_bsg_buffers(phba, rmp);
+free_cmp:
+	lpfc_free_bsg_buffers(phba, cmp);
+free_bmp:
+	if (bmp->virt)
+		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+	kfree(bmp);
 free_cmdiocbq:
 	lpfc_sli_release_iocbq(phba, cmdiocbq);
-free_bmp:
-	kfree(bmp);
 free_ndlp:
 	lpfc_nlp_put(ndlp);
 no_ndlp:
@@ -418,67 +545,68 @@
 	struct fc_bsg_job *job;
 	IOCB_t *rsp;
 	struct lpfc_nodelist *ndlp;
-	struct lpfc_dmabuf *pbuflist = NULL;
+	struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
 	struct fc_bsg_ctels_reply *els_reply;
 	uint8_t *rjt_data;
 	unsigned long flags;
+	unsigned int rsp_size;
 	int rc = 0;
 
-	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	dd_data = cmdiocbq->context1;
-	/* normal completion and timeout crossed paths, already done */
-	if (!dd_data) {
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		return;
+	ndlp = dd_data->context_un.iocb.ndlp;
+	cmdiocbq->context1 = ndlp;
+
+	/* Determine if job has been aborted */
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	job = dd_data->set_job;
+	if (job) {
+		/* Prevent timeout handling from trying to abort job  */
+		job->dd_data = NULL;
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	rsp = &rspiocbq->iocb;
+	pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
+	prsp = (struct lpfc_dmabuf *)pcmd->list.next;
+
+	/* Copy the completed job data or determine the job status if job is
+	 * still active
+	 */
+
+	if (job) {
+		if (rsp->ulpStatus == IOSTAT_SUCCESS) {
+			rsp_size = rsp->un.elsreq64.bdl.bdeSize;
+			job->reply->reply_payload_rcv_len =
+				sg_copy_from_buffer(job->reply_payload.sg_list,
+						    job->reply_payload.sg_cnt,
+						    prsp->virt,
+						    rsp_size);
+		} else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
+			job->reply->reply_payload_rcv_len =
+				sizeof(struct fc_bsg_ctels_reply);
+			/* LS_RJT data returned in word 4 */
+			rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
+			els_reply = &job->reply->reply_data.ctels_reply;
+			els_reply->status = FC_CTELS_STATUS_REJECT;
+			els_reply->rjt_data.action = rjt_data[3];
+			els_reply->rjt_data.reason_code = rjt_data[2];
+			els_reply->rjt_data.reason_explanation = rjt_data[1];
+			els_reply->rjt_data.vendor_unique = rjt_data[0];
+		} else {
+			rc = -EIO;
+		}
 	}
 
-	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
-	if (cmdiocbq->context2 && rspiocbq)
-		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
-		       &rspiocbq->iocb, sizeof(IOCB_t));
-
-	job = dd_data->context_un.iocb.set_job;
-	cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
-	rspiocbq = dd_data->context_un.iocb.rspiocbq;
-	rsp = &rspiocbq->iocb;
-	ndlp = dd_data->context_un.iocb.ndlp;
-
-	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
-		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
-		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-
-	if (job->reply->result == -EAGAIN)
-		rc = -EAGAIN;
-	else if (rsp->ulpStatus == IOSTAT_SUCCESS)
-		job->reply->reply_payload_rcv_len =
-			rsp->un.elsreq64.bdl.bdeSize;
-	else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
-		job->reply->reply_payload_rcv_len =
-			sizeof(struct fc_bsg_ctels_reply);
-		/* LS_RJT data returned in word 4 */
-		rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
-		els_reply = &job->reply->reply_data.ctels_reply;
-		els_reply->status = FC_CTELS_STATUS_REJECT;
-		els_reply->rjt_data.action = rjt_data[3];
-		els_reply->rjt_data.reason_code = rjt_data[2];
-		els_reply->rjt_data.reason_explanation = rjt_data[1];
-		els_reply->rjt_data.vendor_unique = rjt_data[0];
-	} else
-		rc = -EIO;
-
-	pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
-	lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
-	lpfc_sli_release_iocbq(phba, rspiocbq);
-	lpfc_sli_release_iocbq(phba, cmdiocbq);
 	lpfc_nlp_put(ndlp);
+	lpfc_els_free_iocb(phba, cmdiocbq);
 	kfree(dd_data);
-	/* make error code available to userspace */
-	job->reply->result = rc;
-	job->dd_data = NULL;
-	/* complete the job back to userspace */
-	job->job_done(job);
-	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/* Complete the job if the job is still active */
+
+	if (job) {
+		job->reply->result = rc;
+		job->job_done(job);
+	}
 	return;
 }
 
@@ -496,19 +624,8 @@
 	uint32_t elscmd;
 	uint32_t cmdsize;
 	uint32_t rspsize;
-	struct lpfc_iocbq *rspiocbq;
 	struct lpfc_iocbq *cmdiocbq;
-	IOCB_t *rsp;
 	uint16_t rpi = 0;
-	struct lpfc_dmabuf *pcmd;
-	struct lpfc_dmabuf *prsp;
-	struct lpfc_dmabuf *pbuflist = NULL;
-	struct ulp_bde64 *bpl;
-	int request_nseg;
-	int reply_nseg;
-	struct scatterlist *sgel = NULL;
-	int numbde;
-	dma_addr_t busaddr;
 	struct bsg_job_data *dd_data;
 	uint32_t creg_val;
 	int rc = 0;
@@ -516,6 +633,15 @@
 	/* in case no data is transferred */
 	job->reply->reply_payload_rcv_len = 0;
 
+	/* verify the els command is not greater than the
+	 * maximum ELS transfer size.
+	 */
+
+	if (job->request_payload.payload_len > FCELSSIZE) {
+		rc = -EINVAL;
+		goto no_dd_data;
+	}
+
 	/* allocate our bsg tracking structure */
 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
 	if (!dd_data) {
@@ -525,88 +651,51 @@
 		goto no_dd_data;
 	}
 
+	elscmd = job->request->rqst_data.r_els.els_code;
+	cmdsize = job->request_payload.payload_len;
+	rspsize = job->reply_payload.payload_len;
+
 	if (!lpfc_nlp_get(ndlp)) {
 		rc = -ENODEV;
 		goto free_dd_data;
 	}
 
-	elscmd = job->request->rqst_data.r_els.els_code;
-	cmdsize = job->request_payload.payload_len;
-	rspsize = job->reply_payload.payload_len;
-	rspiocbq = lpfc_sli_get_iocbq(phba);
-	if (!rspiocbq) {
-		lpfc_nlp_put(ndlp);
-		rc = -ENOMEM;
-		goto free_dd_data;
-	}
-
-	rsp = &rspiocbq->iocb;
-	rpi = ndlp->nlp_rpi;
+	/* We will use the allocated dma buffers by prep els iocb for command
+	 * and response to ensure if the job times out and the request is freed,
+	 * we won't be dma into memory that is no longer allocated to for the
+	 * request.
+	 */
 
 	cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
 				      ndlp->nlp_DID, elscmd);
 	if (!cmdiocbq) {
 		rc = -EIO;
-		goto free_rspiocbq;
+		goto release_ndlp;
 	}
 
-	/* prep els iocb set context1 to the ndlp, context2 to the command
-	 * dmabuf, context3 holds the data dmabuf
-	 */
-	pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
-	prsp = (struct lpfc_dmabuf *) pcmd->list.next;
-	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
-	kfree(pcmd);
-	lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
-	kfree(prsp);
-	cmdiocbq->context2 = NULL;
+	rpi = ndlp->nlp_rpi;
 
-	pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
-	bpl = (struct ulp_bde64 *) pbuflist->virt;
+	/* Transfer the request payload to allocated command dma buffer */
 
-	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
-				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
-		busaddr = sg_dma_address(sgel);
-		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
-		bpl->tus.f.bdeSize = sg_dma_len(sgel);
-		bpl->tus.w = cpu_to_le32(bpl->tus.w);
-		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
-		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
-		bpl++;
-	}
+	sg_copy_to_buffer(job->request_payload.sg_list,
+			  job->request_payload.sg_cnt,
+			  ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
+			  cmdsize);
 
-	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
-				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
-		busaddr = sg_dma_address(sgel);
-		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
-		bpl->tus.f.bdeSize = sg_dma_len(sgel);
-		bpl->tus.w = cpu_to_le32(bpl->tus.w);
-		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
-		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
-		bpl++;
-	}
-	cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
-		(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
 	if (phba->sli_rev == LPFC_SLI_REV4)
 		cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
 	else
 		cmdiocbq->iocb.ulpContext = rpi;
 	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
-	cmdiocbq->context1 = NULL;
-	cmdiocbq->context2 = NULL;
-
-	cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
 	cmdiocbq->context1 = dd_data;
 	cmdiocbq->context_un.ndlp = ndlp;
-	cmdiocbq->context2 = rspiocbq;
+	cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
 	dd_data->type = TYPE_IOCB;
+	dd_data->set_job = job;
 	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
-	dd_data->context_un.iocb.rspiocbq = rspiocbq;
-	dd_data->context_un.iocb.set_job = job;
-	dd_data->context_un.iocb.bmp = NULL;
 	dd_data->context_un.iocb.ndlp = ndlp;
+	dd_data->context_un.iocb.rmp = NULL;
+	job->dd_data = dd_data;
 
 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
@@ -617,8 +706,9 @@
 		writel(creg_val, phba->HCregaddr);
 		readl(phba->HCregaddr); /* flush */
 	}
+
 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
-	lpfc_nlp_put(ndlp);
+
 	if (rc == IOCB_SUCCESS)
 		return 0; /* done for now */
 	else if (rc == IOCB_BUSY)
@@ -627,17 +717,12 @@
 		rc = -EIO;
 
 linkdown_err:
-	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
-		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
-		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
 
-	lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
+	cmdiocbq->context1 = ndlp;
+	lpfc_els_free_iocb(phba, cmdiocbq);
 
-	lpfc_sli_release_iocbq(phba, cmdiocbq);
-
-free_rspiocbq:
-	lpfc_sli_release_iocbq(phba, rspiocbq);
+release_ndlp:
+	lpfc_nlp_put(ndlp);
 
 free_dd_data:
 	kfree(dd_data);
@@ -680,6 +765,7 @@
 		kfree(ed);
 	}
 
+	kfree(evt->dd_data);
 	kfree(evt);
 }
 
@@ -723,6 +809,7 @@
 	evt->req_id = ev_req_id;
 	evt->reg_id = ev_reg_id;
 	evt->wait_time_stamp = jiffies;
+	evt->dd_data = NULL;
 	init_waitqueue_head(&evt->wq);
 	kref_init(&evt->kref);
 	return evt;
@@ -790,6 +877,7 @@
 	struct lpfc_hbq_entry *hbqe;
 	struct lpfc_sli_ct_request *ct_req;
 	struct fc_bsg_job *job = NULL;
+	struct bsg_job_data *dd_data = NULL;
 	unsigned long flags;
 	int size = 0;
 
@@ -986,10 +1074,11 @@
 		}
 
 		list_move(evt->events_to_see.prev, &evt->events_to_get);
-		lpfc_bsg_event_unref(evt);
 
-		job = evt->set_job;
-		evt->set_job = NULL;
+		dd_data = (struct bsg_job_data *)evt->dd_data;
+		job = dd_data->set_job;
+		dd_data->set_job = NULL;
+		lpfc_bsg_event_unref(evt);
 		if (job) {
 			job->reply->reply_payload_rcv_len = size;
 			/* make error code available to userspace */
@@ -1078,14 +1167,6 @@
 		goto job_error;
 	}
 
-	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
-	if (dd_data == NULL) {
-		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
-				"2734 Failed allocation of dd_data\n");
-		rc = -ENOMEM;
-		goto job_error;
-	}
-
 	event_req = (struct set_ct_event *)
 		job->request->rqst_data.h_vendor.vendor_cmd;
 	ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
@@ -1095,6 +1176,7 @@
 		if (evt->reg_id == event_req->ev_reg_id) {
 			lpfc_bsg_event_ref(evt);
 			evt->wait_time_stamp = jiffies;
+			dd_data = (struct bsg_job_data *)evt->dd_data;
 			break;
 		}
 	}
@@ -1102,6 +1184,13 @@
 
 	if (&evt->node == &phba->ct_ev_waiters) {
 		/* no event waiting struct yet - first call */
+		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+		if (dd_data == NULL) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+					"2734 Failed allocation of dd_data\n");
+			rc = -ENOMEM;
+			goto job_error;
+		}
 		evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
 					event_req->ev_req_id);
 		if (!evt) {
@@ -1111,7 +1200,10 @@
 			rc = -ENOMEM;
 			goto job_error;
 		}
-
+		dd_data->type = TYPE_EVT;
+		dd_data->set_job = NULL;
+		dd_data->context_un.evt = evt;
+		evt->dd_data = (void *)dd_data;
 		spin_lock_irqsave(&phba->ct_ev_lock, flags);
 		list_add(&evt->node, &phba->ct_ev_waiters);
 		lpfc_bsg_event_ref(evt);
@@ -1121,9 +1213,7 @@
 
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	evt->waiting = 1;
-	dd_data->type = TYPE_EVT;
-	dd_data->context_un.evt = evt;
-	evt->set_job = job; /* for unsolicited command */
+	dd_data->set_job = job; /* for unsolicited command */
 	job->dd_data = dd_data; /* for fc transport timeout callback*/
 	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 	return 0; /* call job done later */
@@ -1252,57 +1342,64 @@
 	struct bsg_job_data *dd_data;
 	struct fc_bsg_job *job;
 	IOCB_t *rsp;
-	struct lpfc_dmabuf *bmp;
+	struct lpfc_dmabuf *bmp, *cmp;
 	struct lpfc_nodelist *ndlp;
 	unsigned long flags;
 	int rc = 0;
 
+	dd_data = cmdiocbq->context1;
+
+	/* Determine if job has been aborted */
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
-	dd_data = cmdiocbq->context2;
-	/* normal completion and timeout crossed paths, already done */
-	if (!dd_data) {
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		return;
+	job = dd_data->set_job;
+	if (job) {
+		/* Prevent timeout handling from trying to abort job  */
+		job->dd_data = NULL;
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	ndlp = dd_data->context_un.iocb.ndlp;
+	cmp = cmdiocbq->context2;
+	bmp = cmdiocbq->context3;
+	rsp = &rspiocbq->iocb;
+
+	/* Copy the completed job data or set the error status */
+
+	if (job) {
+		if (rsp->ulpStatus) {
+			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+				case IOERR_SEQUENCE_TIMEOUT:
+					rc = -ETIMEDOUT;
+					break;
+				case IOERR_INVALID_RPI:
+					rc = -EFAULT;
+					break;
+				default:
+					rc = -EACCES;
+					break;
+				}
+			} else {
+				rc = -EACCES;
+			}
+		} else {
+			job->reply->reply_payload_rcv_len = 0;
+		}
 	}
 
-	job = dd_data->context_un.iocb.set_job;
-	bmp = dd_data->context_un.iocb.bmp;
-	rsp = &rspiocbq->iocb;
-	ndlp = dd_data->context_un.iocb.ndlp;
-
-	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
-		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
-
-	if (rsp->ulpStatus) {
-		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
-			switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
-			case IOERR_SEQUENCE_TIMEOUT:
-				rc = -ETIMEDOUT;
-				break;
-			case IOERR_INVALID_RPI:
-				rc = -EFAULT;
-				break;
-			default:
-				rc = -EACCES;
-				break;
-			}
-		} else
-			rc = -EACCES;
-	} else
-		job->reply->reply_payload_rcv_len =
-			rsp->un.genreq64.bdl.bdeSize;
-
+	lpfc_free_bsg_buffers(phba, cmp);
 	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+	kfree(bmp);
 	lpfc_sli_release_iocbq(phba, cmdiocbq);
 	lpfc_nlp_put(ndlp);
-	kfree(bmp);
 	kfree(dd_data);
-	/* make error code available to userspace */
-	job->reply->result = rc;
-	job->dd_data = NULL;
-	/* complete the job back to userspace */
-	job->job_done(job);
-	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/* Complete the job if the job is still active */
+
+	if (job) {
+		job->reply->result = rc;
+		job->job_done(job);
+	}
 	return;
 }
 
@@ -1316,7 +1413,8 @@
  **/
 static int
 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
-		  struct lpfc_dmabuf *bmp, int num_entry)
+		  struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
+		  int num_entry)
 {
 	IOCB_t *icmd;
 	struct lpfc_iocbq *ctiocb = NULL;
@@ -1377,7 +1475,7 @@
 
 		/* Check if the ndlp is active */
 		if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
-			rc = -IOCB_ERROR;
+			rc = IOCB_ERROR;
 			goto issue_ct_rsp_exit;
 		}
 
@@ -1385,7 +1483,7 @@
 		 * we respond
 		 */
 		if (!lpfc_nlp_get(ndlp)) {
-			rc = -IOCB_ERROR;
+			rc = IOCB_ERROR;
 			goto issue_ct_rsp_exit;
 		}
 
@@ -1407,17 +1505,17 @@
 	ctiocb->iocb_cmpl = NULL;
 	ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
 	ctiocb->vport = phba->pport;
+	ctiocb->context1 = dd_data;
+	ctiocb->context2 = cmp;
 	ctiocb->context3 = bmp;
-
 	ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
-	ctiocb->context2 = dd_data;
-	ctiocb->context1 = ndlp;
+
 	dd_data->type = TYPE_IOCB;
+	dd_data->set_job = job;
 	dd_data->context_un.iocb.cmdiocbq = ctiocb;
-	dd_data->context_un.iocb.rspiocbq = NULL;
-	dd_data->context_un.iocb.set_job = job;
-	dd_data->context_un.iocb.bmp = bmp;
 	dd_data->context_un.iocb.ndlp = ndlp;
+	dd_data->context_un.iocb.rmp = NULL;
+	job->dd_data = dd_data;
 
 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
 		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
@@ -1454,11 +1552,8 @@
 	struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
 		job->request->rqst_data.h_vendor.vendor_cmd;
 	struct ulp_bde64 *bpl;
-	struct lpfc_dmabuf *bmp = NULL;
-	struct scatterlist *sgel = NULL;
-	int request_nseg;
-	int numbde;
-	dma_addr_t busaddr;
+	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
+	int bpl_entries;
 	uint32_t tag = mgmt_resp->tag;
 	unsigned long reqbfrcnt =
 			(unsigned long)job->request_payload.payload_len;
@@ -1486,30 +1581,28 @@
 
 	INIT_LIST_HEAD(&bmp->list);
 	bpl = (struct ulp_bde64 *) bmp->virt;
-	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
-				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
-		busaddr = sg_dma_address(sgel);
-		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
-		bpl->tus.f.bdeSize = sg_dma_len(sgel);
-		bpl->tus.w = cpu_to_le32(bpl->tus.w);
-		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
-		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
-		bpl++;
+	bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
+	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
+				     1, bpl, &bpl_entries);
+	if (!cmp) {
+		rc = -ENOMEM;
+		goto send_mgmt_rsp_free_bmp;
 	}
+	lpfc_bsg_copy_data(cmp, &job->request_payload,
+			   job->request_payload.payload_len, 1);
 
-	rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
+	rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
 
 	if (rc == IOCB_SUCCESS)
 		return 0; /* done for now */
 
-	/* TBD need to handle a timeout */
-	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
-			  job->request_payload.sg_cnt, DMA_TO_DEVICE);
 	rc = -EACCES;
-	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+
+	lpfc_free_bsg_buffers(phba, cmp);
 
 send_mgmt_rsp_free_bmp:
+	if (bmp->virt)
+		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
 	kfree(bmp);
 send_mgmt_rsp_exit:
 	/* make error code available to userspace */
@@ -1559,7 +1652,7 @@
 		scsi_block_requests(shost);
 	}
 
-	while (pring->txcmplq_cnt) {
+	while (!list_empty(&pring->txcmplq)) {
 		if (i++ > 500)  /* wait up to 5 seconds */
 			break;
 		msleep(10);
@@ -3193,13 +3286,7 @@
 	unsigned long flags;
 	uint8_t *pmb, *pmb_buf;
 
-	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	dd_data = pmboxq->context1;
-	/* job already timed out? */
-	if (!dd_data) {
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		return;
-	}
 
 	/*
 	 * The outgoing buffer is readily referred from the dma buffer,
@@ -3209,29 +3296,33 @@
 	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
 	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
 
-	job = dd_data->context_un.mbox.set_job;
+	/* Determine if job has been aborted */
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	job = dd_data->set_job;
+	if (job) {
+		/* Prevent timeout handling from trying to abort job  */
+		job->dd_data = NULL;
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/* Copy the mailbox data to the job if it is still active */
+
 	if (job) {
 		size = job->reply_payload.payload_len;
 		job->reply->reply_payload_rcv_len =
 			sg_copy_from_buffer(job->reply_payload.sg_list,
 					    job->reply_payload.sg_cnt,
 					    pmb_buf, size);
-		/* need to hold the lock until we set job->dd_data to NULL
-		 * to hold off the timeout handler returning to the mid-layer
-		 * while we are still processing the job.
-		 */
-		job->dd_data = NULL;
-		dd_data->context_un.mbox.set_job = NULL;
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-	} else {
-		dd_data->context_un.mbox.set_job = NULL;
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 	}
 
+	dd_data->set_job = NULL;
 	mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
 	lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
 	kfree(dd_data);
 
+	/* Complete the job if the job is still active */
+
 	if (job) {
 		job->reply->result = 0;
 		job->job_done(job);
@@ -3377,19 +3468,22 @@
 	struct lpfc_sli_config_mbox *sli_cfg_mbx;
 	uint8_t *pmbx;
 
-	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	dd_data = pmboxq->context1;
-	/* has the job already timed out? */
-	if (!dd_data) {
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		job = NULL;
-		goto job_done_out;
+
+	/* Determine if job has been aborted */
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	job = dd_data->set_job;
+	if (job) {
+		/* Prevent timeout handling from trying to abort job  */
+		job->dd_data = NULL;
 	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
 	/*
 	 * The outgoing buffer is readily referred from the dma buffer,
 	 * just need to get header part from mailboxq structure.
 	 */
+
 	pmb = (uint8_t *)&pmboxq->u.mb;
 	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
 	/* Copy the byte swapped response mailbox back to the user */
@@ -3406,21 +3500,18 @@
 			sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
 	}
 
-	job = dd_data->context_un.mbox.set_job;
+	/* Complete the job if the job is still active */
+
 	if (job) {
 		size = job->reply_payload.payload_len;
 		job->reply->reply_payload_rcv_len =
 			sg_copy_from_buffer(job->reply_payload.sg_list,
 					    job->reply_payload.sg_cnt,
 					    pmb_buf, size);
+
 		/* result for successful */
 		job->reply->result = 0;
-		job->dd_data = NULL;
-		/* need to hold the lock util we set job->dd_data to NULL
-		 * to hold off the timeout handler from midlayer to take
-		 * any action.
-		 */
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
 		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
 				"2937 SLI_CONFIG ext-buffer maibox command "
 				"(x%x/x%x) complete bsg job done, bsize:%d\n",
@@ -3431,20 +3522,18 @@
 					phba->mbox_ext_buf_ctx.mboxType,
 					dma_ebuf, sta_pos_addr,
 					phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
-	} else
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-
-job_done_out:
-	if (!job)
+	} else {
 		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
 				"2938 SLI_CONFIG ext-buffer maibox "
 				"command (x%x/x%x) failure, rc:x%x\n",
 				phba->mbox_ext_buf_ctx.nembType,
 				phba->mbox_ext_buf_ctx.mboxType, rc);
+	}
+
+
 	/* state change */
 	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
 	kfree(dd_data);
-
 	return job;
 }
 
@@ -3461,8 +3550,10 @@
 {
 	struct fc_bsg_job *job;
 
+	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
 	/* handle the BSG job with mailbox command */
-	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+	if (!job)
 		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3470,15 +3561,13 @@
 			"complete, ctxState:x%x, mbxStatus:x%x\n",
 			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
 
-	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
-
 	if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
 		lpfc_bsg_mbox_ext_session_reset(phba);
 
 	/* free base driver mailbox structure memory */
 	mempool_free(pmboxq, phba->mbox_mem_pool);
 
-	/* complete the bsg job if we have it */
+	/* if the job is still active, call job done */
 	if (job)
 		job->job_done(job);
 
@@ -3498,8 +3587,10 @@
 {
 	struct fc_bsg_job *job;
 
+	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
+
 	/* handle the BSG job with the mailbox command */
-	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
+	if (!job)
 		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3507,13 +3598,11 @@
 			"complete, ctxState:x%x, mbxStatus:x%x\n",
 			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
 
-	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
-
 	/* free all memory, including dma buffers */
 	mempool_free(pmboxq, phba->mbox_mem_pool);
 	lpfc_bsg_mbox_ext_session_reset(phba);
 
-	/* complete the bsg job if we have it */
+	/* if the job is still active, call job done */
 	if (job)
 		job->job_done(job);
 
@@ -3759,9 +3848,9 @@
 	/* context fields to callback function */
 	pmboxq->context1 = dd_data;
 	dd_data->type = TYPE_MBOX;
+	dd_data->set_job = job;
 	dd_data->context_un.mbox.pmboxq = pmboxq;
 	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
-	dd_data->context_un.mbox.set_job = job;
 	job->dd_data = dd_data;
 
 	/* state change */
@@ -3928,14 +4017,14 @@
 		/* context fields to callback function */
 		pmboxq->context1 = dd_data;
 		dd_data->type = TYPE_MBOX;
+		dd_data->set_job = job;
 		dd_data->context_un.mbox.pmboxq = pmboxq;
 		dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
-		dd_data->context_un.mbox.set_job = job;
 		job->dd_data = dd_data;
 
 		/* state change */
-		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
 
+		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
 		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
 		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3951,6 +4040,7 @@
 	}
 
 	/* wait for additoinal external buffers */
+
 	job->reply->result = 0;
 	job->job_done(job);
 	return SLI_CONFIG_HANDLED;
@@ -4268,9 +4358,9 @@
 		/* context fields to callback function */
 		pmboxq->context1 = dd_data;
 		dd_data->type = TYPE_MBOX;
+		dd_data->set_job = job;
 		dd_data->context_un.mbox.pmboxq = pmboxq;
 		dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
-		dd_data->context_un.mbox.set_job = job;
 		job->dd_data = dd_data;
 
 		/* state change */
@@ -4455,7 +4545,6 @@
 	uint8_t *from;
 	uint32_t size;
 
-
 	/* in case no data is transferred */
 	job->reply->reply_payload_rcv_len = 0;
 
@@ -4681,9 +4770,9 @@
 	/* setup context field to pass wait_queue pointer to wake function */
 	pmboxq->context1 = dd_data;
 	dd_data->type = TYPE_MBOX;
+	dd_data->set_job = job;
 	dd_data->context_un.mbox.pmboxq = pmboxq;
 	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
-	dd_data->context_un.mbox.set_job = job;
 	dd_data->context_un.mbox.ext = ext;
 	dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
 	dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
@@ -4797,75 +4886,79 @@
 	struct bsg_job_data *dd_data;
 	struct fc_bsg_job *job;
 	IOCB_t *rsp;
-	struct lpfc_dmabuf *bmp;
+	struct lpfc_dmabuf *bmp, *cmp, *rmp;
 	struct lpfc_bsg_menlo *menlo;
 	unsigned long flags;
 	struct menlo_response *menlo_resp;
+	unsigned int rsp_size;
 	int rc = 0;
 
-	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	dd_data = cmdiocbq->context1;
-	if (!dd_data) {
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		return;
-	}
-
+	cmp = cmdiocbq->context2;
+	bmp = cmdiocbq->context3;
 	menlo = &dd_data->context_un.menlo;
-	job = menlo->set_job;
-	job->dd_data = NULL; /* so timeout handler does not reply */
-
-	spin_lock(&phba->hbalock);
-	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
-	if (cmdiocbq->context2 && rspiocbq)
-		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
-		       &rspiocbq->iocb, sizeof(IOCB_t));
-	spin_unlock(&phba->hbalock);
-
-	bmp = menlo->bmp;
-	rspiocbq = menlo->rspiocbq;
+	rmp = menlo->rmp;
 	rsp = &rspiocbq->iocb;
 
-	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
-		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
-		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	/* Determine if job has been aborted */
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	job = dd_data->set_job;
+	if (job) {
+		/* Prevent timeout handling from trying to abort job  */
+		job->dd_data = NULL;
+	}
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 
-	/* always return the xri, this would be used in the case
-	 * of a menlo download to allow the data to be sent as a continuation
-	 * of the exchange.
-	 */
-	menlo_resp = (struct menlo_response *)
-		job->reply->reply_data.vendor_reply.vendor_rsp;
-	menlo_resp->xri = rsp->ulpContext;
-	if (rsp->ulpStatus) {
-		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
-			switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
-			case IOERR_SEQUENCE_TIMEOUT:
-				rc = -ETIMEDOUT;
-				break;
-			case IOERR_INVALID_RPI:
-				rc = -EFAULT;
-				break;
-			default:
+	/* Copy the job data or set the failing status for the job */
+
+	if (job) {
+		/* always return the xri, this would be used in the case
+		 * of a menlo download to allow the data to be sent as a
+		 * continuation of the exchange.
+		 */
+
+		menlo_resp = (struct menlo_response *)
+			job->reply->reply_data.vendor_reply.vendor_rsp;
+		menlo_resp->xri = rsp->ulpContext;
+		if (rsp->ulpStatus) {
+			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
+				case IOERR_SEQUENCE_TIMEOUT:
+					rc = -ETIMEDOUT;
+					break;
+				case IOERR_INVALID_RPI:
+					rc = -EFAULT;
+					break;
+				default:
+					rc = -EACCES;
+					break;
+				}
+			} else {
 				rc = -EACCES;
-				break;
 			}
-		} else
-			rc = -EACCES;
-	} else
-		job->reply->reply_payload_rcv_len =
-			rsp->un.genreq64.bdl.bdeSize;
+		} else {
+			rsp_size = rsp->un.genreq64.bdl.bdeSize;
+			job->reply->reply_payload_rcv_len =
+				lpfc_bsg_copy_data(rmp, &job->reply_payload,
+						   rsp_size, 0);
+		}
 
-	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-	lpfc_sli_release_iocbq(phba, rspiocbq);
+	}
+
 	lpfc_sli_release_iocbq(phba, cmdiocbq);
+	lpfc_free_bsg_buffers(phba, cmp);
+	lpfc_free_bsg_buffers(phba, rmp);
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
 	kfree(bmp);
 	kfree(dd_data);
-	/* make error code available to userspace */
-	job->reply->result = rc;
-	/* complete the job back to userspace */
-	job->job_done(job);
-	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+
+	/* Complete the job if active */
+
+	if (job) {
+		job->reply->result = rc;
+		job->job_done(job);
+	}
+
 	return;
 }
 
@@ -4883,17 +4976,14 @@
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
 	struct lpfc_hba *phba = vport->phba;
-	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
-	IOCB_t *cmd, *rsp;
+	struct lpfc_iocbq *cmdiocbq;
+	IOCB_t *cmd;
 	int rc = 0;
 	struct menlo_command *menlo_cmd;
 	struct menlo_response *menlo_resp;
-	struct lpfc_dmabuf *bmp = NULL;
+	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
 	int request_nseg;
 	int reply_nseg;
-	struct scatterlist *sgel = NULL;
-	int numbde;
-	dma_addr_t busaddr;
 	struct bsg_job_data *dd_data;
 	struct ulp_bde64 *bpl = NULL;
 
@@ -4948,50 +5038,38 @@
 		goto free_dd;
 	}
 
-	cmdiocbq = lpfc_sli_get_iocbq(phba);
-	if (!cmdiocbq) {
+	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+	if (!bmp->virt) {
 		rc = -ENOMEM;
 		goto free_bmp;
 	}
 
-	rspiocbq = lpfc_sli_get_iocbq(phba);
-	if (!rspiocbq) {
-		rc = -ENOMEM;
-		goto free_cmdiocbq;
-	}
-
-	rsp = &rspiocbq->iocb;
-
-	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
-	if (!bmp->virt) {
-		rc = -ENOMEM;
-		goto free_rspiocbq;
-	}
-
 	INIT_LIST_HEAD(&bmp->list);
-	bpl = (struct ulp_bde64 *) bmp->virt;
-	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
-				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
-		busaddr = sg_dma_address(sgel);
-		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
-		bpl->tus.f.bdeSize = sg_dma_len(sgel);
-		bpl->tus.w = cpu_to_le32(bpl->tus.w);
-		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
-		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
-		bpl++;
+
+	bpl = (struct ulp_bde64 *)bmp->virt;
+	request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
+	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
+				     1, bpl, &request_nseg);
+	if (!cmp) {
+		rc = -ENOMEM;
+		goto free_bmp;
+	}
+	lpfc_bsg_copy_data(cmp, &job->request_payload,
+			   job->request_payload.payload_len, 1);
+
+	bpl += request_nseg;
+	reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
+	rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
+				     bpl, &reply_nseg);
+	if (!rmp) {
+		rc = -ENOMEM;
+		goto free_cmp;
 	}
 
-	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
-				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
-		busaddr = sg_dma_address(sgel);
-		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
-		bpl->tus.f.bdeSize = sg_dma_len(sgel);
-		bpl->tus.w = cpu_to_le32(bpl->tus.w);
-		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
-		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
-		bpl++;
+	cmdiocbq = lpfc_sli_get_iocbq(phba);
+	if (!cmdiocbq) {
+		rc = -ENOMEM;
+		goto free_rmp;
 	}
 
 	cmd = &cmdiocbq->iocb;
@@ -5013,11 +5091,10 @@
 	cmdiocbq->vport = phba->pport;
 	/* We want the firmware to timeout before we do */
 	cmd->ulpTimeout = MENLO_TIMEOUT - 5;
-	cmdiocbq->context3 = bmp;
-	cmdiocbq->context2 = rspiocbq;
 	cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
 	cmdiocbq->context1 = dd_data;
-	cmdiocbq->context2 = rspiocbq;
+	cmdiocbq->context2 = cmp;
+	cmdiocbq->context3 = bmp;
 	if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
 		cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
 		cmd->ulpPU = MENLO_PU; /* 3 */
@@ -5031,29 +5108,25 @@
 	}
 
 	dd_data->type = TYPE_MENLO;
+	dd_data->set_job = job;
 	dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
-	dd_data->context_un.menlo.rspiocbq = rspiocbq;
-	dd_data->context_un.menlo.set_job = job;
-	dd_data->context_un.menlo.bmp = bmp;
+	dd_data->context_un.menlo.rmp = rmp;
+	job->dd_data = dd_data;
 
 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
 		MENLO_TIMEOUT - 5);
 	if (rc == IOCB_SUCCESS)
 		return 0; /* done for now */
 
-	/* iocb failed so cleanup */
-	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
-		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
-	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
-		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
-
-	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
-
-free_rspiocbq:
-	lpfc_sli_release_iocbq(phba, rspiocbq);
-free_cmdiocbq:
 	lpfc_sli_release_iocbq(phba, cmdiocbq);
+
+free_rmp:
+	lpfc_free_bsg_buffers(phba, rmp);
+free_cmp:
+	lpfc_free_bsg_buffers(phba, cmp);
 free_bmp:
+	if (bmp->virt)
+		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
 	kfree(bmp);
 free_dd:
 	kfree(dd_data);
@@ -5162,70 +5235,94 @@
 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_iocbq *cmdiocb;
-	struct lpfc_bsg_event *evt;
-	struct lpfc_bsg_iocb *iocb;
-	struct lpfc_bsg_mbox *mbox;
-	struct lpfc_bsg_menlo *menlo;
 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 	struct bsg_job_data *dd_data;
 	unsigned long flags;
+	int rc = 0;
+	LIST_HEAD(completions);
+	struct lpfc_iocbq *check_iocb, *next_iocb;
+
+	/* if job's driver data is NULL, the command completed or is in the
+	 * the process of completing.  In this case, return status to request
+	 * so the timeout is retried.  This avoids double completion issues
+	 * and the request will be pulled off the timer queue when the
+	 * command's completion handler executes.  Otherwise, prevent the
+	 * command's completion handler from executing the job done callback
+	 * and continue processing to abort the outstanding the command.
+	 */
 
 	spin_lock_irqsave(&phba->ct_ev_lock, flags);
 	dd_data = (struct bsg_job_data *)job->dd_data;
-	/* timeout and completion crossed paths if no dd_data */
-	if (!dd_data) {
+	if (dd_data) {
+		dd_data->set_job = NULL;
+		job->dd_data = NULL;
+	} else {
 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		return 0;
+		return -EAGAIN;
 	}
 
 	switch (dd_data->type) {
 	case TYPE_IOCB:
-		iocb = &dd_data->context_un.iocb;
-		cmdiocb = iocb->cmdiocbq;
-		/* hint to completion handler that the job timed out */
-		job->reply->result = -EAGAIN;
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		/* this will call our completion handler */
-		spin_lock_irq(&phba->hbalock);
-		lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
-		spin_unlock_irq(&phba->hbalock);
-		break;
-	case TYPE_EVT:
-		evt = dd_data->context_un.evt;
-		/* this event has no job anymore */
-		evt->set_job = NULL;
-		job->dd_data = NULL;
-		job->reply->reply_payload_rcv_len = 0;
-		/* Return -EAGAIN which is our way of signallying the
-		 * app to retry.
+		/* Check to see if IOCB was issued to the port or not. If not,
+		 * remove it from the txq queue and call cancel iocbs.
+		 * Otherwise, call abort iotag
 		 */
-		job->reply->result = -EAGAIN;
+
+		cmdiocb = dd_data->context_un.iocb.cmdiocbq;
+		spin_lock_irq(&phba->hbalock);
+		list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
+					 list) {
+			if (check_iocb == cmdiocb) {
+				list_move_tail(&check_iocb->list, &completions);
+				break;
+			}
+		}
+		if (list_empty(&completions))
+			lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+		spin_unlock_irq(&phba->hbalock);
 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		job->job_done(job);
+		if (!list_empty(&completions)) {
+			lpfc_sli_cancel_iocbs(phba, &completions,
+					      IOSTAT_LOCAL_REJECT,
+					      IOERR_SLI_ABORTED);
+		}
 		break;
-	case TYPE_MBOX:
-		mbox = &dd_data->context_un.mbox;
-		/* this mbox has no job anymore */
-		mbox->set_job = NULL;
-		job->dd_data = NULL;
-		job->reply->reply_payload_rcv_len = 0;
-		job->reply->result = -EAGAIN;
-		/* the mbox completion handler can now be run */
+
+	case TYPE_EVT:
 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		job->job_done(job);
+		break;
+
+	case TYPE_MBOX:
+		/* Update the ext buf ctx state if needed */
+
 		if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
 			phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 		break;
 	case TYPE_MENLO:
-		menlo = &dd_data->context_un.menlo;
-		cmdiocb = menlo->cmdiocbq;
-		/* hint to completion handler that the job timed out */
-		job->reply->result = -EAGAIN;
-		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
-		/* this will call our completion handler */
+		/* Check to see if IOCB was issued to the port or not. If not,
+		 * remove it from the txq queue and call cancel iocbs.
+		 * Otherwise, call abort iotag.
+		 */
+
+		cmdiocb = dd_data->context_un.menlo.cmdiocbq;
 		spin_lock_irq(&phba->hbalock);
-		lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+		list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
+					 list) {
+			if (check_iocb == cmdiocb) {
+				list_move_tail(&check_iocb->list, &completions);
+				break;
+			}
+		}
+		if (list_empty(&completions))
+			lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
 		spin_unlock_irq(&phba->hbalock);
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		if (!list_empty(&completions)) {
+			lpfc_sli_cancel_iocbs(phba, &completions,
+					      IOSTAT_LOCAL_REJECT,
+					      IOERR_SLI_ABORTED);
+		}
 		break;
 	default:
 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
@@ -5236,5 +5333,5 @@
 	 * otherwise an error message will be displayed on the console
 	 * so always return success (zero)
 	 */
-	return 0;
+	return rc;
 }
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 76ca65d..7631893 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -106,6 +106,7 @@
 void lpfc_cleanup(struct lpfc_vport *);
 void lpfc_disc_timeout(unsigned long);
 
+int lpfc_unregister_fcf_prep(struct lpfc_hba *);
 struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
 struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
 void lpfc_worker_wake_up(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 08d156a..bbed847 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -484,6 +484,7 @@
 	vport->port_state = LPFC_FABRIC_CFG_LINK;
 	memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
 	lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
+
 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
 	mboxq->vport = vport;
 	mboxq->context1 = dmabuf;
@@ -700,6 +701,20 @@
 		}
 	}
 
+	/*
+	 * For FC we need to do some special processing because of the SLI
+	 * Port's default settings of the Common Service Parameters.
+	 */
+	if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) {
+		/* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */
+		if ((phba->sli_rev == LPFC_SLI_REV4) && fabric_param_changed)
+			lpfc_unregister_fcf_prep(phba);
+
+		/* This should just update the VFI CSPs*/
+		if (vport->fc_flag & FC_VFI_REGISTERED)
+			lpfc_issue_reg_vfi(vport);
+	}
+
 	if (fabric_param_changed &&
 		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
 
@@ -6225,7 +6240,7 @@
 		spin_unlock_irq(&phba->hbalock);
 	}
 
-	if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
+	if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
 		mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
 }
 
@@ -6279,7 +6294,6 @@
 			continue;
 
 		list_move_tail(&piocb->list, &completions);
-		pring->txq_cnt--;
 	}
 
 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
@@ -6339,7 +6353,6 @@
 		    cmd->ulpCommand == CMD_ABORT_XRI_CN)
 			continue;
 		list_move_tail(&piocb->list, &completions);
-		pring->txq_cnt--;
 	}
 	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
 		if (piocb->iocb_flag & LPFC_IO_LIBDFC)
@@ -8065,7 +8078,7 @@
 				rxid, 1);
 
 			/* Check if TXQ queue needs to be serviced */
-			if (pring->txq_cnt)
+			if (!(list_empty(&pring->txq)))
 				lpfc_worker_wake_up(phba);
 			return;
 		}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index bfda184..326e05a 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -691,12 +691,15 @@
 			/* Set the lpfc data pending flag */
 			set_bit(LPFC_DATA_READY, &phba->data_flags);
 		} else {
-			pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
-			lpfc_sli_handle_slow_ring_event(phba, pring,
-							(status &
-							 HA_RXMASK));
+			if (phba->link_state >= LPFC_LINK_UP) {
+				pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
+				lpfc_sli_handle_slow_ring_event(phba, pring,
+								(status &
+								HA_RXMASK));
+			}
 		}
-		if ((phba->sli_rev == LPFC_SLI_REV4) && pring->txq_cnt)
+		if ((phba->sli_rev == LPFC_SLI_REV4) &
+				 (!list_empty(&pring->txq)))
 			lpfc_drain_txq(phba);
 		/*
 		 * Turn on Ring interrupts
@@ -1792,6 +1795,8 @@
 	virt_addr = mboxq->sge_array->addr[0];
 
 	shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
+	lpfc_sli_pcimem_bcopy(shdr, shdr,
+			      sizeof(union lpfc_sli4_cfg_shdr));
 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
 	if (shdr_status || shdr_add_status) {
@@ -2888,6 +2893,11 @@
 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
 		goto out_free_mem;
 	}
+
+	/* If the VFI is already registered, there is nothing else to do */
+	if (vport->fc_flag & FC_VFI_REGISTERED)
+		goto out_free_mem;
+
 	/* The VPI is implicitly registered when the VFI is registered */
 	spin_lock_irq(shost->host_lock);
 	vport->vpi_state |= LPFC_VPI_REGISTERED;
@@ -2980,6 +2990,7 @@
 	struct lpfc_dmabuf *mp;
 	int rc;
 	struct fcf_record *fcf_record;
+	uint32_t fc_flags = 0;
 
 	spin_lock_irq(&phba->hbalock);
 	switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
@@ -3011,11 +3022,8 @@
 				"1309 Link Up Event npiv not supported in loop "
 				"topology\n");
 				/* Get Loop Map information */
-		if (bf_get(lpfc_mbx_read_top_il, la)) {
-			spin_lock(shost->host_lock);
-			vport->fc_flag |= FC_LBIT;
-			spin_unlock(shost->host_lock);
-		}
+		if (bf_get(lpfc_mbx_read_top_il, la))
+			fc_flags |= FC_LBIT;
 
 		vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
 		i = la->lilpBde64.tus.f.bdeSize;
@@ -3064,12 +3072,16 @@
 				phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
 		}
 		vport->fc_myDID = phba->fc_pref_DID;
-		spin_lock(shost->host_lock);
-		vport->fc_flag |= FC_LBIT;
-		spin_unlock(shost->host_lock);
+		fc_flags |= FC_LBIT;
 	}
 	spin_unlock_irq(&phba->hbalock);
 
+	if (fc_flags) {
+		spin_lock_irq(shost->host_lock);
+		vport->fc_flag |= fc_flags;
+		spin_unlock_irq(shost->host_lock);
+	}
+
 	lpfc_linkup(phba);
 	sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!sparam_mbox)
@@ -3237,8 +3249,7 @@
 		vport->fc_flag &= ~FC_BYPASSED_MODE;
 	spin_unlock_irq(shost->host_lock);
 
-	if ((phba->fc_eventTag  < la->eventTag) ||
-	    (phba->fc_eventTag == la->eventTag)) {
+	if (phba->fc_eventTag <= la->eventTag) {
 		phba->fc_stat.LinkMultiEvent++;
 		if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)
 			if (phba->fc_eventTag != 0)
@@ -3246,16 +3257,18 @@
 	}
 
 	phba->fc_eventTag = la->eventTag;
-	spin_lock_irq(&phba->hbalock);
-	if (bf_get(lpfc_mbx_read_top_mm, la))
-		phba->sli.sli_flag |= LPFC_MENLO_MAINT;
-	else
-		phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
-	spin_unlock_irq(&phba->hbalock);
+	if (phba->sli_rev < LPFC_SLI_REV4) {
+		spin_lock_irq(&phba->hbalock);
+		if (bf_get(lpfc_mbx_read_top_mm, la))
+			phba->sli.sli_flag |= LPFC_MENLO_MAINT;
+		else
+			phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
+		spin_unlock_irq(&phba->hbalock);
+	}
 
 	phba->link_events++;
 	if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) &&
-	    (!bf_get(lpfc_mbx_read_top_mm, la))) {
+	    !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
 		phba->fc_stat.LinkUp++;
 		if (phba->link_flag & LS_LOOPBACK_MODE) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -3300,8 +3313,8 @@
 				bf_get(lpfc_mbx_read_top_fa, la));
 		lpfc_mbx_issue_link_down(phba);
 	}
-	if ((bf_get(lpfc_mbx_read_top_mm, la)) &&
-	    (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)) {
+	if ((phba->sli.sli_flag & LPFC_MENLO_MAINT) &&
+	    ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP))) {
 		if (phba->link_state != LPFC_LINK_DOWN) {
 			phba->fc_stat.LinkDown++;
 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
@@ -3329,8 +3342,9 @@
 		}
 	}
 
-	if (bf_get(lpfc_mbx_read_top_fa, la)) {
-		if (bf_get(lpfc_mbx_read_top_mm, la))
+	if ((phba->sli_rev < LPFC_SLI_REV4) &&
+	    bf_get(lpfc_mbx_read_top_fa, la)) {
+		if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
 			lpfc_issue_clear_la(phba, vport);
 		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
 				"1311 fa %d\n",
@@ -4354,7 +4368,6 @@
 					   with an error */
 					list_move_tail(&iocb->list,
 						       &completions);
-					pring->txq_cnt--;
 				}
 			}
 			spin_unlock_irq(&phba->hbalock);
@@ -5055,7 +5068,6 @@
 		    (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
 
 			list_move_tail(&iocb->list, &completions);
-			pring->txq_cnt--;
 		}
 	}
 
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 6e93b88..1dd2f6f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1958,6 +1958,9 @@
 
 struct lpfc_mbx_reg_vfi {
 	uint32_t word1;
+#define lpfc_reg_vfi_upd_SHIFT		29
+#define lpfc_reg_vfi_upd_MASK		0x00000001
+#define lpfc_reg_vfi_upd_WORD		word1
 #define lpfc_reg_vfi_vp_SHIFT		28
 #define lpfc_reg_vfi_vp_MASK		0x00000001
 #define lpfc_reg_vfi_vp_WORD		word1
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 314b4f6..5da2972 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -839,7 +839,6 @@
 		 * way, nothing should be on txcmplq as it will NEVER complete.
 		 */
 		list_splice_init(&pring->txcmplq, &completions);
-		pring->txcmplq_cnt = 0;
 		spin_unlock_irq(&phba->hbalock);
 
 		/* Cancel all the IOCBs from the completions list */
@@ -2915,9 +2914,9 @@
 			sglq_entry->state = SGL_FREED;
 			list_add_tail(&sglq_entry->list, &els_sgl_list);
 		}
-		spin_lock(&phba->hbalock);
+		spin_lock_irq(&phba->hbalock);
 		list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
-		spin_unlock(&phba->hbalock);
+		spin_unlock_irq(&phba->hbalock);
 	} else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
 		/* els xri-sgl shrinked */
 		xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
@@ -3015,9 +3014,9 @@
 		psb->cur_iocbq.sli4_lxritag = lxri;
 		psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
 	}
-	spin_lock(&phba->scsi_buf_list_lock);
+	spin_lock_irq(&phba->scsi_buf_list_lock);
 	list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list);
-	spin_unlock(&phba->scsi_buf_list_lock);
+	spin_unlock_irq(&phba->scsi_buf_list_lock);
 
 	return 0;
 
@@ -4004,6 +4003,52 @@
 }
 
 /**
+ * lpfc_sli4_perform_inuse_fcf_recovery - Perform inuse fcf recovery
+ * @vport: pointer to lpfc hba data structure.
+ *
+ * This routine is to perform FCF recovery when the in-use FCF either dead or
+ * got modified.
+ **/
+static void
+lpfc_sli4_perform_inuse_fcf_recovery(struct lpfc_hba *phba,
+				     struct lpfc_acqe_fip *acqe_fip)
+{
+	int rc;
+
+	spin_lock_irq(&phba->hbalock);
+	/* Mark the fast failover process in progress */
+	phba->fcf.fcf_flag |= FCF_DEAD_DISC;
+	spin_unlock_irq(&phba->hbalock);
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+			"2771 Start FCF fast failover process due to in-use "
+			"FCF DEAD/MODIFIED event: evt_tag:x%x, index:x%x\n",
+			acqe_fip->event_tag, acqe_fip->index);
+	rc = lpfc_sli4_redisc_fcf_table(phba);
+	if (rc) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+				"2772 Issue FCF rediscover mabilbox command "
+				"failed, fail through to FCF dead event\n");
+		spin_lock_irq(&phba->hbalock);
+		phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+		spin_unlock_irq(&phba->hbalock);
+		/*
+		 * Last resort will fail over by treating this as a link
+		 * down to FCF registration.
+		 */
+		lpfc_sli4_fcf_dead_failthrough(phba);
+	} else {
+		/* Reset FCF roundrobin bmask for new discovery */
+		lpfc_sli4_clear_fcf_rr_bmask(phba);
+		/*
+		 * Handling fast FCF failover to a DEAD FCF event is
+		 * considered equalivant to receiving CVL to all vports.
+		 */
+		lpfc_sli4_perform_all_vport_cvl(phba);
+	}
+}
+
+/**
  * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
  * @phba: pointer to lpfc hba data structure.
  * @acqe_link: pointer to the async fcoe completion queue entry.
@@ -4068,9 +4113,22 @@
 			break;
 		}
 
-		/* If the FCF has been in discovered state, do nothing. */
-		if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
+		/* If FCF has been in discovered state, perform rediscovery
+		 * only if the FCF with the same index of the in-use FCF got
+		 * modified during normal operation. Otherwise, do nothing.
+		 */
+		if (phba->pport->port_state > LPFC_FLOGI) {
 			spin_unlock_irq(&phba->hbalock);
+			if (phba->fcf.current_rec.fcf_indx ==
+			    acqe_fip->index) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+						"3300 In-use FCF (%d) "
+						"modified, perform FCF "
+						"rediscovery\n",
+						acqe_fip->index);
+				lpfc_sli4_perform_inuse_fcf_recovery(phba,
+								     acqe_fip);
+			}
 			break;
 		}
 		spin_unlock_irq(&phba->hbalock);
@@ -4123,39 +4181,7 @@
 		 * is no longer valid as we are not in the middle of FCF
 		 * failover process already.
 		 */
-		spin_lock_irq(&phba->hbalock);
-		/* Mark the fast failover process in progress */
-		phba->fcf.fcf_flag |= FCF_DEAD_DISC;
-		spin_unlock_irq(&phba->hbalock);
-
-		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
-				"2771 Start FCF fast failover process due to "
-				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
-				"\n", acqe_fip->event_tag, acqe_fip->index);
-		rc = lpfc_sli4_redisc_fcf_table(phba);
-		if (rc) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
-					LOG_DISCOVERY,
-					"2772 Issue FCF rediscover mabilbox "
-					"command failed, fail through to FCF "
-					"dead event\n");
-			spin_lock_irq(&phba->hbalock);
-			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
-			spin_unlock_irq(&phba->hbalock);
-			/*
-			 * Last resort will fail over by treating this
-			 * as a link down to FCF registration.
-			 */
-			lpfc_sli4_fcf_dead_failthrough(phba);
-		} else {
-			/* Reset FCF roundrobin bmask for new discovery */
-			lpfc_sli4_clear_fcf_rr_bmask(phba);
-			/*
-			 * Handling fast FCF failover to a DEAD FCF event is
-			 * considered equalivant to receiving CVL to all vports.
-			 */
-			lpfc_sli4_perform_all_vport_cvl(phba);
-		}
+		lpfc_sli4_perform_inuse_fcf_recovery(phba, acqe_fip);
 		break;
 	case LPFC_FIP_EVENT_TYPE_CVL:
 		phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index efc9cd9..a7a9fa4 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2126,32 +2126,40 @@
 lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
 {
 	struct lpfc_mbx_reg_vfi *reg_vfi;
+	struct lpfc_hba *phba = vport->phba;
 
 	memset(mbox, 0, sizeof(*mbox));
 	reg_vfi = &mbox->u.mqe.un.reg_vfi;
 	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
 	bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
 	bf_set(lpfc_reg_vfi_vfi, reg_vfi,
-	       vport->phba->sli4_hba.vfi_ids[vport->vfi]);
-	bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
-	bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]);
+	       phba->sli4_hba.vfi_ids[vport->vfi]);
+	bf_set(lpfc_reg_vfi_fcfi, reg_vfi, phba->fcf.fcfi);
+	bf_set(lpfc_reg_vfi_vpi, reg_vfi, phba->vpi_ids[vport->vpi]);
 	memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
 	reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
 	reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
-	reg_vfi->e_d_tov = vport->phba->fc_edtov;
-	reg_vfi->r_a_tov = vport->phba->fc_ratov;
+	reg_vfi->e_d_tov = phba->fc_edtov;
+	reg_vfi->r_a_tov = phba->fc_ratov;
 	reg_vfi->bde.addrHigh = putPaddrHigh(phys);
 	reg_vfi->bde.addrLow = putPaddrLow(phys);
 	reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
 	reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
 	bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
+
+	/* Only FC supports upd bit */
+	if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) &&
+	    (vport->fc_flag & FC_VFI_REGISTERED)) {
+		bf_set(lpfc_reg_vfi_vp, reg_vfi, 0);
+		bf_set(lpfc_reg_vfi_upd, reg_vfi, 1);
+	}
 	lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
 			"3134 Register VFI, mydid:x%x, fcfi:%d, "
 			" vfi:%d, vpi:%d, fc_pname:%x%x\n",
 			vport->fc_myDID,
-			vport->phba->fcf.fcfi,
-			vport->phba->sli4_hba.vfi_ids[vport->vfi],
-			vport->phba->vpi_ids[vport->vpi],
+			phba->fcf.fcfi,
+			phba->sli4_hba.vfi_ids[vport->vfi],
+			phba->vpi_ids[vport->vpi],
 			reg_vfi->wwn[0], reg_vfi->wwn[1]);
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 46128c6..82f4d35 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -226,7 +226,6 @@
 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) {
 			/* It matches, so deque and call compl with anp error */
 			list_move_tail(&iocb->list, &completions);
-			pring->txq_cnt--;
 		}
 	}
 
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 98af07c..74b8710 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -732,7 +732,7 @@
 		psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
 		psb->exch_busy = 0;
 		spin_unlock_irqrestore(&phba->hbalock, iflag);
-		if (pring->txq_cnt)
+		if (!list_empty(&pring->txq))
 			lpfc_worker_wake_up(phba);
 		return;
 
@@ -885,9 +885,9 @@
 	int num_posted, rc = 0;
 
 	/* get all SCSI buffers need to repost to a local list */
-	spin_lock(&phba->scsi_buf_list_lock);
+	spin_lock_irq(&phba->scsi_buf_list_lock);
 	list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist);
-	spin_unlock(&phba->scsi_buf_list_lock);
+	spin_unlock_irq(&phba->scsi_buf_list_lock);
 
 	/* post the list of scsi buffer sgls to port if available */
 	if (!list_empty(&post_sblist)) {
@@ -4246,7 +4246,7 @@
 	unsigned long  poll_tmo_expires =
 		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
 
-	if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
+	if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
 		mod_timer(&phba->fcp_poll_timer,
 			  poll_tmo_expires);
 }
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index d43faf3..35dd17e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -873,14 +873,16 @@
 				xritag, rxid, ndlp->nlp_DID, send_rrq);
 		return -EINVAL;
 	}
-	rrq->send_rrq = send_rrq;
+	if (phba->cfg_enable_rrq == 1)
+		rrq->send_rrq = send_rrq;
+	else
+		rrq->send_rrq = 0;
 	rrq->xritag = xritag;
 	rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
 	rrq->ndlp = ndlp;
 	rrq->nlp_DID = ndlp->nlp_DID;
 	rrq->vport = ndlp->vport;
 	rrq->rxid = rxid;
-	rrq->send_rrq = send_rrq;
 	spin_lock_irqsave(&phba->hbalock, iflags);
 	empty = list_empty(&phba->active_rrq_list);
 	list_add_tail(&rrq->list, &phba->active_rrq_list);
@@ -1009,6 +1011,18 @@
 	else
 		sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
 
+	/*
+	** This should have been removed from the txcmplq before calling
+	** iocbq_release. The normal completion
+	** path should have already done the list_del_init.
+	*/
+	if (unlikely(!list_empty(&iocbq->list))) {
+		if (iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)
+			iocbq->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
+		list_del_init(&iocbq->list);
+	}
+
+
 	if (sglq)  {
 		if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
 			(sglq->state != SGL_XRI_ABORTED)) {
@@ -1025,7 +1039,7 @@
 				&phba->sli4_hba.lpfc_sgl_list);
 
 			/* Check if TXQ queue needs to be serviced */
-			if (pring->txq_cnt)
+			if (!list_empty(&pring->txq))
 				lpfc_worker_wake_up(phba);
 		}
 	}
@@ -1057,6 +1071,14 @@
 	size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
 
 	/*
+	** This should have been removed from the txcmplq before calling
+	** iocbq_release. The normal completion
+	** path should have already done the list_del_init.
+	*/
+	if (unlikely(!list_empty(&iocbq->list)))
+		list_del_init(&iocbq->list);
+
+	/*
 	 * Clean all volatile data fields, preserve iotag and node struct.
 	 */
 	memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
@@ -1122,7 +1144,6 @@
 
 	while (!list_empty(iocblist)) {
 		list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
-
 		if (!piocb->iocb_cmpl)
 			lpfc_sli_release_iocbq(phba, piocb);
 		else {
@@ -1310,9 +1331,6 @@
 {
 	list_add_tail(&piocb->list, &pring->txcmplq);
 	piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
-	pring->txcmplq_cnt++;
-	if (pring->txcmplq_cnt > pring->txcmplq_max)
-		pring->txcmplq_max = pring->txcmplq_cnt;
 
 	if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
 	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@@ -1344,8 +1362,6 @@
 	struct lpfc_iocbq *cmd_iocb;
 
 	list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
-	if (cmd_iocb != NULL)
-		pring->txq_cnt--;
 	return cmd_iocb;
 }
 
@@ -1614,8 +1630,9 @@
 	 *  (c) link attention events can be processed (fcp ring only)
 	 *  (d) IOCB processing is not blocked by the outstanding mbox command.
 	 */
-	if (pring->txq_cnt &&
-	    lpfc_is_link_up(phba) &&
+
+	if (lpfc_is_link_up(phba) &&
+	    (!list_empty(&pring->txq)) &&
 	    (pring->ringno != phba->sli.fcp_ring ||
 	     phba->sli.sli_flag & LPFC_PROCESS_LA)) {
 
@@ -2612,7 +2629,6 @@
 		cmd_iocb = phba->sli.iocbq_lookup[iotag];
 		list_del_init(&cmd_iocb->list);
 		if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
-			pring->txcmplq_cnt--;
 			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
 		}
 		return cmd_iocb;
@@ -2650,7 +2666,6 @@
 			/* remove from txcmpl queue list */
 			list_del_init(&cmd_iocb->list);
 			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
-			pring->txcmplq_cnt--;
 			return cmd_iocb;
 		}
 	}
@@ -3499,7 +3514,6 @@
 	 */
 	spin_lock_irq(&phba->hbalock);
 	list_splice_init(&pring->txq, &completions);
-	pring->txq_cnt = 0;
 
 	/* Next issue ABTS for everything on the txcmplq */
 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
@@ -3536,11 +3550,9 @@
 	spin_lock_irq(&phba->hbalock);
 	/* Retrieve everything on txq */
 	list_splice_init(&pring->txq, &txq);
-	pring->txq_cnt = 0;
 
 	/* Retrieve everything on the txcmplq */
 	list_splice_init(&pring->txcmplq, &txcmplq);
-	pring->txcmplq_cnt = 0;
 
 	/* Indicate the I/O queues are flushed */
 	phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
@@ -5988,9 +6000,9 @@
 	LIST_HEAD(post_sgl_list);
 	LIST_HEAD(free_sgl_list);
 
-	spin_lock(&phba->hbalock);
+	spin_lock_irq(&phba->hbalock);
 	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
-	spin_unlock(&phba->hbalock);
+	spin_unlock_irq(&phba->hbalock);
 
 	list_for_each_entry_safe(sglq_entry, sglq_entry_next,
 				 &allc_sgl_list, list) {
@@ -6091,10 +6103,10 @@
 
 	/* push els sgls posted to the availble list */
 	if (!list_empty(&post_sgl_list)) {
-		spin_lock(&phba->hbalock);
+		spin_lock_irq(&phba->hbalock);
 		list_splice_init(&post_sgl_list,
 				 &phba->sli4_hba.lpfc_sgl_list);
-		spin_unlock(&phba->hbalock);
+		spin_unlock_irq(&phba->hbalock);
 	} else {
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"3161 Failure to post els sgl to port.\n");
@@ -7615,7 +7627,6 @@
 {
 	/* Insert the caller's iocb in the txq tail for later processing. */
 	list_add_tail(&piocb->list, &pring->txq);
-	pring->txq_cnt++;
 }
 
 /**
@@ -8387,7 +8398,7 @@
 		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
 			sglq = NULL;
 		else {
-			if (pring->txq_cnt) {
+			if (!list_empty(&pring->txq)) {
 				if (!(flag & SLI_IOCB_RET_IOCB)) {
 					__lpfc_sli_ringtx_put(phba,
 						pring, piocb);
@@ -9055,7 +9066,6 @@
 			if (iocb->vport != vport)
 				continue;
 			list_move_tail(&iocb->list, &completions);
-			pring->txq_cnt--;
 		}
 
 		/* Next issue ABTS for everything on the txcmplq */
@@ -9124,8 +9134,6 @@
 		 * given to the FW yet.
 		 */
 		list_splice_init(&pring->txq, &completions);
-		pring->txq_cnt = 0;
-
 	}
 	spin_unlock_irqrestore(&phba->hbalock, flags);
 
@@ -9966,6 +9974,9 @@
 	long timeleft, timeout_req = 0;
 	int retval = IOCB_SUCCESS;
 	uint32_t creg_val;
+	struct lpfc_iocbq *iocb;
+	int txq_cnt = 0;
+	int txcmplq_cnt = 0;
 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 	/*
 	 * If the caller has provided a response iocbq buffer, then context2
@@ -10013,9 +10024,17 @@
 			retval = IOCB_TIMEDOUT;
 		}
 	} else if (retval == IOCB_BUSY) {
-		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
-			"2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
-			phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt);
+		if (phba->cfg_log_verbose & LOG_SLI) {
+			list_for_each_entry(iocb, &pring->txq, list) {
+				txq_cnt++;
+			}
+			list_for_each_entry(iocb, &pring->txcmplq, list) {
+				txcmplq_cnt++;
+			}
+			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+				"2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
+				phba->iocb_cnt, txq_cnt, txcmplq_cnt);
+		}
 		return retval;
 	} else {
 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -11298,16 +11317,25 @@
 	struct lpfc_iocbq *irspiocbq;
 	unsigned long iflags;
 	struct lpfc_sli_ring *pring = cq->pring;
+	int txq_cnt = 0;
+	int txcmplq_cnt = 0;
+	int fcp_txcmplq_cnt = 0;
 
 	/* Get an irspiocbq for later ELS response processing use */
 	irspiocbq = lpfc_sli_get_iocbq(phba);
 	if (!irspiocbq) {
+		if (!list_empty(&pring->txq))
+			txq_cnt++;
+		if (!list_empty(&pring->txcmplq))
+			txcmplq_cnt++;
+		if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
+			fcp_txcmplq_cnt++;
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 			"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
 			"fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
-			pring->txq_cnt, phba->iocb_cnt,
-			phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt,
-			phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt);
+			txq_cnt, phba->iocb_cnt,
+			fcp_txcmplq_cnt,
+			txcmplq_cnt);
 		return false;
 	}
 
@@ -15482,11 +15510,18 @@
 			LPFC_SLI4_FCF_TBL_INDX_MAX);
 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
 			"3060 Last IDX %d\n", last_index);
-	if (list_empty(&phba->fcf.fcf_pri_list)) {
+
+	/* Verify the priority list has 2 or more entries */
+	spin_lock_irq(&phba->hbalock);
+	if (list_empty(&phba->fcf.fcf_pri_list) ||
+	    list_is_singular(&phba->fcf.fcf_pri_list)) {
+		spin_unlock_irq(&phba->hbalock);
 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
 			"3061 Last IDX %d\n", last_index);
 		return 0; /* Empty rr list */
 	}
+	spin_unlock_irq(&phba->hbalock);
+
 	next_fcf_pri = 0;
 	/*
 	 * Clear the rr_bmask and set all of the bits that are at this
@@ -16245,14 +16280,19 @@
 	char *fail_msg = NULL;
 	struct lpfc_sglq *sglq;
 	union lpfc_wqe wqe;
+	int txq_cnt = 0;
 
 	spin_lock_irqsave(&phba->hbalock, iflags);
-	if (pring->txq_cnt > pring->txq_max)
-		pring->txq_max = pring->txq_cnt;
+	list_for_each_entry(piocbq, &pring->txq, list) {
+		txq_cnt++;
+	}
+
+	if (txq_cnt > pring->txq_max)
+		pring->txq_max = txq_cnt;
 
 	spin_unlock_irqrestore(&phba->hbalock, iflags);
 
-	while (pring->txq_cnt) {
+	while (!list_empty(&pring->txq)) {
 		spin_lock_irqsave(&phba->hbalock, iflags);
 
 		piocbq = lpfc_sli_ringtx_get(phba, pring);
@@ -16260,7 +16300,7 @@
 			spin_unlock_irqrestore(&phba->hbalock, iflags);
 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
 				"2823 txq empty and txq_cnt is %d\n ",
-				pring->txq_cnt);
+				txq_cnt);
 			break;
 		}
 		sglq = __lpfc_sli_get_sglq(phba, piocbq);
@@ -16269,6 +16309,7 @@
 			spin_unlock_irqrestore(&phba->hbalock, iflags);
 			break;
 		}
+		txq_cnt--;
 
 		/* The xri and iocb resources secured,
 		 * attempt to issue request
@@ -16300,5 +16341,5 @@
 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
 				IOERR_SLI_ABORTED);
 
-	return pring->txq_cnt;
+	return txq_cnt;
 }
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f3b7795..664cd04 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.37"
+#define LPFC_DRIVER_VERSION "8.3.38"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index dce7d78..c37b244 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,6 +1,6 @@
 qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
 		qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
-        qla_nx.o qla_target.o
+        qla_nx.o qla_mr.o qla_target.o
 
 obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
 obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b3db9dc..bf60c63 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -888,7 +888,10 @@
 	struct qla_hw_data *ha = vha->hw;
 	uint32_t sn;
 
-	if (IS_FWI2_CAPABLE(ha)) {
+	if (IS_QLAFX00(vha->hw)) {
+		return snprintf(buf, PAGE_SIZE, "%s\n",
+		    vha->hw->mr.serial_num);
+	} else if (IS_FWI2_CAPABLE(ha)) {
 		qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
 		return snprintf(buf, PAGE_SIZE, "%s\n", buf);
 	}
@@ -912,6 +915,11 @@
 {
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	struct qla_hw_data *ha = vha->hw;
+
+	if (IS_QLAFX00(vha->hw))
+		return snprintf(buf, PAGE_SIZE, "%s\n",
+		    vha->hw->mr.hw_version);
+
 	return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
 	    ha->product_id[0], ha->product_id[1], ha->product_id[2],
 	    ha->product_id[3]);
@@ -922,6 +930,11 @@
 			char *buf)
 {
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+	if (IS_QLAFX00(vha->hw))
+		return snprintf(buf, PAGE_SIZE, "%s\n",
+		    vha->hw->mr.product_name);
+
 	return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
 }
 
@@ -1304,6 +1317,12 @@
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	int rval = QLA_FUNCTION_FAILED;
 	uint16_t state[5];
+	uint32_t pstate;
+
+	if (IS_QLAFX00(vha->hw)) {
+		pstate = qlafx00_fw_state_show(dev, attr, buf);
+		return snprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
+	}
 
 	if (qla2x00_reset_active(vha))
 		ql_log(ql_log_warn, vha, 0x707c,
@@ -1454,6 +1473,11 @@
 					(shost_priv(shost)))->hw;
 	u32 speed = FC_PORTSPEED_UNKNOWN;
 
+	if (IS_QLAFX00(ha)) {
+		qlafx00_get_host_speed(shost);
+		return;
+	}
+
 	switch (ha->link_data_rate) {
 	case PORT_SPEED_1GB:
 		speed = FC_PORTSPEED_1GBIT;
@@ -1637,6 +1661,9 @@
 {
 	scsi_qla_host_t *vha = shost_priv(shost);
 
+	if (IS_QLAFX00(vha->hw))
+		return 0;
+
 	qla2x00_loop_reset(vha);
 	return 0;
 }
@@ -1655,6 +1682,9 @@
 	pfc_host_stat = &vha->fc_host_stat;
 	memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
 
+	if (IS_QLAFX00(vha->hw))
+		goto done;
+
 	if (test_bit(UNLOADING, &vha->dpc_flags))
 		goto done;
 
@@ -2087,6 +2117,9 @@
 		    FC_PORTSPEED_1GBIT;
 	else if (IS_QLA23XX(ha))
 		speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+	else if (IS_QLAFX00(ha))
+		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
+		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
 	else
 		speed = FC_PORTSPEED_1GBIT;
 	fc_host_supported_speeds(vha->host) = speed;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index ad54099..39719f8 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -30,14 +30,31 @@
 	struct scsi_qla_host *vha = sp->fcport->vha;
 	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
 	struct qla_hw_data *ha = vha->hw;
+	struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
 
-	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
-	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	if (sp->type == SRB_FXIOCB_BCMD) {
+		piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
+		    &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
 
-	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
-	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+		if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
+			dma_unmap_sg(&ha->pdev->dev,
+			    bsg_job->request_payload.sg_list,
+			    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+		if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
+			dma_unmap_sg(&ha->pdev->dev,
+			    bsg_job->reply_payload.sg_list,
+			    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	} else {
+		dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
+		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+		dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
+		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	}
 
 	if (sp->type == SRB_CT_CMD ||
+	    sp->type == SRB_FXIOCB_BCMD ||
 	    sp->type == SRB_ELS_CMD_HST)
 		kfree(sp->fcport);
 	qla2x00_rel_sp(vha, sp);
@@ -751,6 +768,8 @@
 	elreq.transfer_size = req_data_len;
 
 	elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+	elreq.iteration_count =
+	    bsg_job->request->rqst_data.h_vendor.vendor_cmd[2];
 
 	if (atomic_read(&vha->loop_state) == LOOP_READY &&
 	    (ha->current_topology == ISP_CFG_F ||
@@ -1883,6 +1902,128 @@
 }
 
 static int
+qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
+{
+	struct Scsi_Host *host = bsg_job->shost;
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	int rval = (DRIVER_ERROR << 16);
+	struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
+	srb_t *sp;
+	int req_sg_cnt = 0, rsp_sg_cnt = 0;
+	struct fc_port *fcport;
+	char  *type = "FC_BSG_HST_FX_MGMT";
+
+	/* Copy the IOCB specific information */
+	piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
+	    &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+	/* Dump the vendor information */
+	ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
+	    (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
+
+	if (!vha->flags.online) {
+		ql_log(ql_log_warn, vha, 0x70d0,
+		    "Host is not online.\n");
+		rval = -EIO;
+		goto done;
+	}
+
+	if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
+		req_sg_cnt = dma_map_sg(&ha->pdev->dev,
+		    bsg_job->request_payload.sg_list,
+		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+		if (!req_sg_cnt) {
+			ql_log(ql_log_warn, vha, 0x70c7,
+			    "dma_map_sg return %d for request\n", req_sg_cnt);
+			rval = -ENOMEM;
+			goto done;
+		}
+	}
+
+	if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
+		rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
+		    bsg_job->reply_payload.sg_list,
+		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+		if (!rsp_sg_cnt) {
+			ql_log(ql_log_warn, vha, 0x70c8,
+			    "dma_map_sg return %d for reply\n", rsp_sg_cnt);
+			rval = -ENOMEM;
+			goto done_unmap_req_sg;
+		}
+	}
+
+	ql_dbg(ql_dbg_user, vha, 0x70c9,
+	    "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
+	    "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
+	    req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
+
+	/* Allocate a dummy fcport structure, since functions preparing the
+	 * IOCB and mailbox command retrieves port specific information
+	 * from fcport structure. For Host based ELS commands there will be
+	 * no fcport structure allocated
+	 */
+	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+	if (!fcport) {
+		ql_log(ql_log_warn, vha, 0x70ca,
+		    "Failed to allocate fcport.\n");
+		rval = -ENOMEM;
+		goto done_unmap_rsp_sg;
+	}
+
+	/* Alloc SRB structure */
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp) {
+		ql_log(ql_log_warn, vha, 0x70cb,
+		    "qla2x00_get_sp failed.\n");
+		rval = -ENOMEM;
+		goto done_free_fcport;
+	}
+
+	/* Initialize all required  fields of fcport */
+	fcport->vha = vha;
+	fcport->loop_id = piocb_rqst->dataword;
+
+	sp->type = SRB_FXIOCB_BCMD;
+	sp->name = "bsg_fx_mgmt";
+	sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
+	sp->u.bsg_job = bsg_job;
+	sp->free = qla2x00_bsg_sp_free;
+	sp->done = qla2x00_bsg_job_done;
+
+	ql_dbg(ql_dbg_user, vha, 0x70cc,
+	    "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
+	    type, piocb_rqst->func_type, fcport->loop_id);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x70cd,
+		    "qla2x00_start_sp failed=%d.\n", rval);
+		mempool_free(sp, ha->srb_mempool);
+		rval = -EIO;
+		goto done_free_fcport;
+	}
+	return rval;
+
+done_free_fcport:
+	kfree(fcport);
+
+done_unmap_rsp_sg:
+	if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
+		dma_unmap_sg(&ha->pdev->dev,
+		    bsg_job->reply_payload.sg_list,
+		    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+done_unmap_req_sg:
+	if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
+		dma_unmap_sg(&ha->pdev->dev,
+		    bsg_job->request_payload.sg_list,
+		    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+done:
+	return rval;
+}
+
+static int
 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
 {
 	switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
@@ -1928,6 +2069,8 @@
 	case QL_VND_DIAG_IO_CMD:
 		return qla24xx_process_bidir_cmd(bsg_job);
 
+	case QL_VND_FX00_MGMT_CMD:
+		return qlafx00_mgmt_cmd(bsg_job);
 	default:
 		return -ENOSYS;
 	}
@@ -2007,7 +2150,8 @@
 			sp = req->outstanding_cmds[cnt];
 			if (sp) {
 				if (((sp->type == SRB_CT_CMD) ||
-					(sp->type == SRB_ELS_CMD_HST))
+					(sp->type == SRB_ELS_CMD_HST) ||
+					(sp->type == SRB_FXIOCB_BCMD))
 					&& (sp->u.bsg_job == bsg_job)) {
 					spin_unlock_irqrestore(&ha->hardware_lock, flags);
 					if (ha->isp_ops->abort_command(sp)) {
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index e9f6b9b..04f7703 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -22,6 +22,7 @@
 #define QL_VND_DIAG_IO_CMD	0x0A
 #define QL_VND_WRITE_I2C	0x10
 #define QL_VND_READ_I2C		0x11
+#define QL_VND_FX00_MGMT_CMD	0x12
 
 /* BSG Vendor specific subcode returns */
 #define EXT_STATUS_OK			0
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index fbc305f..cfa2a20 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,28 +11,31 @@
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes	|
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x0126       | 0x4b,0xba,0xfa |
- * | Mailbox commands             |       0x115b       | 0x111a-0x111b  |
- * |                              |                    | 0x112c-0x112e  |
- * |                              |                    | 0x113a         |
+ * | Module Init and Probe        |       0x014f       | 0x4b,0xba,0xfa |
+ * | Mailbox commands             |       0x1179       | 0x111a-0x111b  |
  * |                              |                    | 0x1155-0x1158  |
- * | Device Discovery             |       0x2087       | 0x2020-0x2022, |
+ * | Device Discovery             |       0x2095       | 0x2020-0x2022, |
  * |                              |                    | 0x2016         |
- * | Queue Command and IO tracing |       0x3031       | 0x3006-0x300b  |
+ * | Queue Command and IO tracing |       0x3058       | 0x3006-0x300b  |
  * |                              |                    | 0x3027-0x3028  |
- * |                              |                    | 0x302d-0x302e  |
- * | DPC Thread                   |       0x401d       | 0x4002,0x4013  |
- * | Async Events                 |       0x5071       | 0x502b-0x502f  |
+ * |                              |                    | 0x303d-0x3041  |
+ * |                              |                    | 0x302d,0x3033  |
+ * |                              |                    | 0x3036,0x3038  |
+ * |                              |                    | 0x303a		|
+ * | DPC Thread                   |       0x4022       | 0x4002,0x4013  |
+ * | Async Events                 |       0x5081       | 0x502b-0x502f  |
  * |                              |                    | 0x5047,0x5052  |
+ * |                              |                    | 0x5040,0x5075  |
  * | Timer Routines               |       0x6011       |                |
- * | User Space Interactions      |       0x70c4       | 0x7018,0x702e, |
+ * | User Space Interactions      |       0x70dd       | 0x7018,0x702e, |
  * |                              |                    | 0x7020,0x7024, |
  * |                              |                    | 0x7039,0x7045, |
  * |                              |                    | 0x7073-0x7075, |
- * |                              |                    | 0x708c,        |
+ * |                              |                    | 0x707b,0x708c, |
  * |                              |                    | 0x70a5,0x70a6, |
  * |                              |                    | 0x70a8,0x70ab, |
- * |                              |                    | 0x70ad-0x70ae  |
+ * |                              |                    | 0x70ad-0x70ae, |
+ * |                              |                    | 0x70d1-0x70da  |
  * | Task Management              |       0x803c       | 0x8025-0x8026  |
  * |                              |                    | 0x800b,0x8039  |
  * | AER/EEH                      |       0x9011       |		|
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 65c5ff7..c32efc7 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -245,7 +245,6 @@
 
 #define MAX_CMDSZ	16		/* SCSI maximum CDB size. */
 #include "qla_fw.h"
-
 /*
  * Timeout timer counts in seconds
  */
@@ -265,6 +264,7 @@
 #define RESPONSE_ENTRY_CNT_2300		512	/* Number of response entries.*/
 #define RESPONSE_ENTRY_CNT_MQ		128	/* Number of response entries.*/
 #define ATIO_ENTRY_CNT_24XX		4096	/* Number of ATIO entries. */
+#define RESPONSE_ENTRY_CNT_FX00		256     /* Number of response entries.*/
 
 struct req_que;
 
@@ -284,6 +284,7 @@
 struct srb_cmd {
 	struct scsi_cmnd *cmd;		/* Linux SCSI command pkt */
 	uint32_t request_sense_length;
+	uint32_t fw_sense_length;
 	uint8_t *request_sense_ptr;
 	void *ctx;
 };
@@ -321,7 +322,39 @@
 			uint32_t flags;
 			uint32_t lun;
 			uint32_t data;
+			struct completion comp;
+			uint32_t comp_status;
 		} tmf;
+		struct {
+#define SRB_FXDISC_REQ_DMA_VALID	BIT_0
+#define SRB_FXDISC_RESP_DMA_VALID	BIT_1
+#define SRB_FXDISC_REQ_DWRD_VALID	BIT_2
+#define SRB_FXDISC_RSP_DWRD_VALID	BIT_3
+#define FXDISC_TIMEOUT 20
+			uint8_t flags;
+			uint32_t req_len;
+			uint32_t rsp_len;
+			void *req_addr;
+			void *rsp_addr;
+			dma_addr_t req_dma_handle;
+			dma_addr_t rsp_dma_handle;
+			uint32_t adapter_id;
+			uint32_t adapter_id_hi;
+			uint32_t req_func_type;
+			uint32_t req_data;
+			uint32_t req_data_extra;
+			uint32_t result;
+			uint32_t seq_number;
+			uint32_t fw_flags;
+			struct completion fxiocb_comp;
+			uint32_t reserved_0;
+			uint8_t reserved_1;
+		} fxiocb;
+		struct {
+			uint32_t cmd_hndl;
+			uint32_t comp_status;
+			struct completion comp;
+		} abt;
 	} u;
 
 	struct timer_list timer;
@@ -338,6 +371,10 @@
 #define SRB_TM_CMD	7
 #define SRB_SCSI_CMD	8
 #define SRB_BIDI_CMD	9
+#define SRB_FXIOCB_DCMD	10
+#define SRB_FXIOCB_BCMD	11
+#define SRB_ABT_CMD	12
+
 
 typedef struct srb {
 	atomic_t ref_count;
@@ -368,6 +405,10 @@
 	(sp->u.scmd.request_sense_ptr)
 #define SET_CMD_SENSE_PTR(sp, ptr) \
 	(sp->u.scmd.request_sense_ptr = ptr)
+#define GET_FW_SENSE_LEN(sp) \
+	(sp->u.scmd.fw_sense_length)
+#define SET_FW_SENSE_LEN(sp, len) \
+	(sp->u.scmd.fw_sense_length = len)
 
 struct msg_echo_lb {
 	dma_addr_t send_dma;
@@ -376,6 +417,7 @@
 	uint16_t rsp_sg_cnt;
 	uint16_t options;
 	uint32_t transfer_size;
+	uint32_t iteration_count;
 };
 
 /*
@@ -542,11 +584,74 @@
 	uint32_t atio_q_out;
 };
 
+
+struct device_reg_fx00 {
+	uint32_t mailbox0;		/* 00 */
+	uint32_t mailbox1;		/* 04 */
+	uint32_t mailbox2;		/* 08 */
+	uint32_t mailbox3;		/* 0C */
+	uint32_t mailbox4;		/* 10 */
+	uint32_t mailbox5;		/* 14 */
+	uint32_t mailbox6;		/* 18 */
+	uint32_t mailbox7;		/* 1C */
+	uint32_t mailbox8;		/* 20 */
+	uint32_t mailbox9;		/* 24 */
+	uint32_t mailbox10;		/* 28 */
+	uint32_t mailbox11;
+	uint32_t mailbox12;
+	uint32_t mailbox13;
+	uint32_t mailbox14;
+	uint32_t mailbox15;
+	uint32_t mailbox16;
+	uint32_t mailbox17;
+	uint32_t mailbox18;
+	uint32_t mailbox19;
+	uint32_t mailbox20;
+	uint32_t mailbox21;
+	uint32_t mailbox22;
+	uint32_t mailbox23;
+	uint32_t mailbox24;
+	uint32_t mailbox25;
+	uint32_t mailbox26;
+	uint32_t mailbox27;
+	uint32_t mailbox28;
+	uint32_t mailbox29;
+	uint32_t mailbox30;
+	uint32_t mailbox31;
+	uint32_t aenmailbox0;
+	uint32_t aenmailbox1;
+	uint32_t aenmailbox2;
+	uint32_t aenmailbox3;
+	uint32_t aenmailbox4;
+	uint32_t aenmailbox5;
+	uint32_t aenmailbox6;
+	uint32_t aenmailbox7;
+	/* Request Queue. */
+	uint32_t req_q_in;		/* A0 - Request Queue In-Pointer */
+	uint32_t req_q_out;		/* A4 - Request Queue Out-Pointer */
+	/* Response Queue. */
+	uint32_t rsp_q_in;		/* A8 - Response Queue In-Pointer */
+	uint32_t rsp_q_out;		/* AC - Response Queue Out-Pointer */
+	/* Init values shadowed on FW Up Event */
+	uint32_t initval0;		/* B0 */
+	uint32_t initval1;		/* B4 */
+	uint32_t initval2;		/* B8 */
+	uint32_t initval3;		/* BC */
+	uint32_t initval4;		/* C0 */
+	uint32_t initval5;		/* C4 */
+	uint32_t initval6;		/* C8 */
+	uint32_t initval7;		/* CC */
+	uint32_t fwheartbeat;		/* D0 */
+};
+
+
+
 typedef union {
 		struct device_reg_2xxx isp;
 		struct device_reg_24xx isp24;
 		struct device_reg_25xxmq isp25mq;
 		struct device_reg_82xx isp82;
+		struct device_reg_fx00 ispfx00;
 } device_reg_t;
 
 #define ISP_REQ_Q_IN(ha, reg) \
@@ -602,6 +707,20 @@
 #define IOCTL_CMD	BIT_2
 } mbx_cmd_t;
 
+struct mbx_cmd_32 {
+	uint32_t	out_mb;		/* outbound from driver */
+	uint32_t	in_mb;			/* Incoming from RISC */
+	uint32_t	mb[MAILBOX_REGISTER_COUNT];
+	long		buf_size;
+	void		*bufp;
+	uint32_t	tov;
+	uint8_t		flags;
+#define MBX_DMA_IN	BIT_0
+#define	MBX_DMA_OUT	BIT_1
+#define IOCTL_CMD	BIT_2
+};
+
+
 #define	MBX_TOV_SECONDS	30
 
 /*
@@ -677,6 +796,15 @@
 #define MBA_BYPASS_NOTIFICATION	0x8043	/* Auto bypass notification. */
 #define MBA_DISCARD_RND_FRAME	0x8048	/* discard RND frame due to error. */
 #define MBA_REJECTED_FCP_CMD	0x8049	/* rejected FCP_CMD. */
+#define MBA_FW_NOT_STARTED	0x8050	/* Firmware not started */
+#define MBA_FW_STARTING		0x8051	/* Firmware starting */
+#define MBA_FW_RESTART_CMPLT	0x8060	/* Firmware restart complete */
+#define MBA_INIT_REQUIRED	0x8061	/* Initialization required */
+#define MBA_SHUTDOWN_REQUESTED	0x8062	/* Shutdown Requested */
+#define MBA_FW_INIT_FAILURE	0x8401	/* Firmware initialization failure */
+#define MBA_MIRROR_LUN_CHANGE	0x8402	/* Mirror LUN State Change
+					   Notification */
+#define MBA_FW_POLL_STATE	0x8600  /* Firmware in poll diagnostic state */
 
 /* 83XX FCoE specific */
 #define MBA_IDC_AEN		0x8200  /* FCoE: NIC Core state change AEN */
@@ -798,6 +926,12 @@
 #define MBC_LUN_RESET			0x7E	/* Send LUN reset */
 
 /*
+ * all the Mt. Rainier mailbox command codes that clash with FC/FCoE ones
+ * should be defined with MBC_MR_*
+ */
+#define MBC_MR_DRV_SHUTDOWN		0x6A
+
+/*
  * ISP24xx mailbox commands
  */
 #define MBC_SERDES_PARAMS		0x10	/* Serdes Tx Parameters. */
@@ -1058,6 +1192,30 @@
 	uint8_t  reserved_3[26];
 } init_cb_t;
 
+
+struct init_cb_fx {
+	uint16_t	version;
+	uint16_t	reserved_1[13];
+	uint16_t	request_q_outpointer;
+	uint16_t	response_q_inpointer;
+	uint16_t	reserved_2[2];
+	uint16_t	response_q_length;
+	uint16_t	request_q_length;
+	uint16_t	reserved_3[2];
+	uint32_t	request_q_address[2];
+	uint32_t	response_q_address[2];
+	uint16_t	reserved_4[4];
+	uint8_t		response_q_msivec;
+	uint8_t		reserved_5[19];
+	uint16_t	interrupt_delay_timer;
+	uint16_t	reserved_6;
+	uint32_t	fwoptions1;
+	uint32_t	fwoptions2;
+	uint32_t	fwoptions3;
+	uint8_t		reserved_7[24];
+};
+
+
 /*
  * Get Link Status mailbox command return buffer.
  */
@@ -1831,6 +1989,9 @@
 	uint16_t loop_id;
 	uint16_t old_loop_id;
 
+	uint16_t tgt_id;
+	uint16_t old_tgt_id;
+
 	uint8_t fcp_prio;
 
 	uint8_t fabric_port_name[WWN_SIZE];
@@ -1848,8 +2009,15 @@
 
 	uint8_t fc4_type;
 	uint8_t scan_state;
+
+	unsigned long last_queue_full;
+	unsigned long last_ramp_up;
+
+	uint16_t port_id;
 } fc_port_t;
 
+#include "qla_mr.h"
+
 /*
  * Fibre channel port/lun states.
  */
@@ -2391,6 +2559,7 @@
 	int (*start_scsi) (srb_t *);
 	int (*abort_isp) (struct scsi_qla_host *);
 	int (*iospace_config)(struct qla_hw_data*);
+	int (*initialize_adapter)(struct scsi_qla_host *);
 };
 
 /* MSI-X Support *************************************************************/
@@ -2429,6 +2598,7 @@
 	QLA_EVT_ASYNC_ADISC,
 	QLA_EVT_ASYNC_ADISC_DONE,
 	QLA_EVT_UEVENT,
+	QLA_EVT_AENFX,
 };
 
 
@@ -2456,7 +2626,15 @@
 			u32 code;
 #define QLA_UEVENT_CODE_FW_DUMP	0
 		} uevent;
-	} u;
+		struct {
+			uint32_t        evtcode;
+			uint32_t        mbx[8];
+			uint32_t        count;
+		} aenfx;
+		struct {
+			srb_t *sp;
+		} iosb;
+	 } u;
 };
 
 struct qla_chip_state_84xx {
@@ -2520,6 +2698,11 @@
 	struct req_que *req;
 	srb_t *status_srb; /* status continuation entry */
 	struct work_struct q_work;
+
+	dma_addr_t  dma_fx00;
+	response_t *ring_fx00;
+	uint16_t  length_fx00;
+	uint8_t rsp_pkt[REQUEST_ENTRY_SIZE];
 };
 
 /* Request queue data structure */
@@ -2544,6 +2727,11 @@
 	uint16_t num_outstanding_cmds;
 #define	MAX_Q_DEPTH		32
 	int max_q_depth;
+
+	dma_addr_t  dma_fx00;
+	request_t *ring_fx00;
+	uint16_t  length_fx00;
+	uint8_t req_pkt[REQUEST_ENTRY_SIZE];
 };
 
 /* Place holder for FW buffer parameters */
@@ -2633,7 +2821,10 @@
 		uint32_t	isp82xx_no_md_cap:1;
 		uint32_t	host_shutting_down:1;
 		uint32_t	idc_compl_status:1;
-		/* 32 bits */
+
+		uint32_t        mr_reset_hdlr_active:1;
+		uint32_t        mr_intr_valid:1;
+		/* 34 bits */
 	} flags;
 
 	/* This spinlock is used to protect "io transactions", you must
@@ -2650,7 +2841,21 @@
 	resource_size_t pio_address;
 
 #define MIN_IOBASE_LEN          0x100
-/* Multi queue data structs */
+	dma_addr_t		bar0_hdl;
+
+	void __iomem *cregbase;
+	dma_addr_t		bar2_hdl;
+#define BAR0_LEN_FX00			(1024 * 1024)
+#define BAR2_LEN_FX00			(128 * 1024)
+
+	uint32_t		rqstq_intr_code;
+	uint32_t		mbx_intr_code;
+	uint32_t		req_que_len;
+	uint32_t		rsp_que_len;
+	uint32_t		req_que_off;
+	uint32_t		rsp_que_off;
+
+	/* Multi queue data structs */
 	device_reg_t __iomem *mqiobase;
 	device_reg_t __iomem *msixbase;
 	uint16_t        msix_count;
@@ -2729,7 +2934,8 @@
 #define DT_ISP8021			BIT_14
 #define DT_ISP2031			BIT_15
 #define DT_ISP8031			BIT_16
-#define DT_ISP_LAST			(DT_ISP8031 << 1)
+#define DT_ISPFX00			BIT_17
+#define DT_ISP_LAST			(DT_ISPFX00 << 1)
 
 #define DT_T10_PI                       BIT_25
 #define DT_IIDMA                        BIT_26
@@ -2757,6 +2963,7 @@
 #define IS_QLA82XX(ha)	(DT_MASK(ha) & DT_ISP8021)
 #define IS_QLA2031(ha)	(DT_MASK(ha) & DT_ISP2031)
 #define IS_QLA8031(ha)	(DT_MASK(ha) & DT_ISP8031)
+#define IS_QLAFX00(ha)	(DT_MASK(ha) & DT_ISPFX00)
 
 #define IS_QLA23XX(ha)  (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
 			IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2821,6 +3028,7 @@
 	uint16_t	r_a_tov;
 	int		port_down_retry_count;
 	uint8_t		mbx_count;
+	uint8_t		aen_mbx_count;
 
 	uint32_t	login_retry_count;
 	/* SNS command interfaces. */
@@ -2868,9 +3076,13 @@
 	void		*swl;
 
 	/* These are used by mailbox operations. */
-	volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
+	uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
+	uint32_t mailbox_out32[MAILBOX_REGISTER_COUNT];
+	uint32_t aenmb[AEN_MAILBOX_REGISTER_COUNT_FX00];
 
 	mbx_cmd_t	*mcp;
+	struct mbx_cmd_32	*mcp32;
+
 	unsigned long	mbx_cmd_flags;
 #define MBX_INTERRUPT		1
 #define MBX_INTR_WAIT		2
@@ -3014,6 +3226,7 @@
 	int             cur_vport_count;
 
 	struct qla_chip_state_84xx *cs84xx;
+	struct qla_statistics qla_stats;
 	struct isp_operations *isp_ops;
 	struct workqueue_struct *wq;
 	struct qlfc_fw fw_buf;
@@ -3080,6 +3293,8 @@
 	unsigned long   host_last_rampup_time;
 	int             cfg_lun_q_depth;
 
+	struct mr_data_fx00 mr;
+
 	struct qlt_hw_data tgt;
 	uint16_t	thermal_support;
 #define THERMAL_SUPPORT_I2C BIT_0
@@ -3109,6 +3324,8 @@
 		uint32_t	process_response_queue	:1;
 		uint32_t	difdix_supported:1;
 		uint32_t	delete_progress:1;
+
+		uint32_t	fw_tgt_reported:1;
 	} flags;
 
 	atomic_t	loop_state;
@@ -3144,6 +3361,9 @@
 #define SCR_PENDING		21	/* SCR in target mode */
 #define HOST_RAMP_DOWN_QUEUE_DEPTH     22
 #define HOST_RAMP_UP_QUEUE_DEPTH       23
+#define PORT_UPDATE_NEEDED	24
+#define FX00_RESET_RECOVERY	25
+#define FX00_TARGET_SCAN	26
 
 	uint32_t	device_flags;
 #define SWITCH_FOUND		BIT_0
@@ -3234,6 +3454,10 @@
 	 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) || \
 	 atomic_read(&ha->loop_state) == LOOP_DOWN)
 
+#define STATE_TRANSITION(ha) \
+		(test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \
+			 test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
+
 #define QLA_VHA_MARK_BUSY(__vha, __bail) do {		     \
 	atomic_inc(&__vha->vref_count);			     \
 	mb();						     \
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index b310fa9..026bfde 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -86,6 +86,7 @@
 
 extern int
 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
+extern int qla2x00_init_rings(scsi_qla_host_t *);
 
 /*
  * Global Data in qla_os.c source file.
@@ -134,7 +135,6 @@
     uint16_t *);
 extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
     fc_port_t *, uint16_t *);
-extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
 
 extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
 
@@ -158,6 +158,7 @@
 extern int __qla83xx_set_drv_presence(scsi_qla_host_t *vha);
 extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
 extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
+extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
 
 /*
  * Global Functions in qla_mid.c source file.
@@ -211,8 +212,6 @@
 int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
 						uint16_t, uint16_t, uint8_t);
 extern int qla2x00_start_sp(srb_t *);
-extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
-extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
 extern int qla24xx_dif_start_scsi(srb_t *);
 extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
 extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
@@ -424,6 +423,12 @@
 
 extern int qla2x00_get_data_rate(scsi_qla_host_t *);
 extern const char *qla2x00_get_link_speed_str(struct qla_hw_data *, uint16_t);
+extern srb_t *
+qla2x00_get_sp_from_handle(scsi_qla_host_t *, const char *, struct req_que *,
+	void *);
+extern void
+qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
+	uint32_t);
 
 /*
  * Global Function Prototypes in qla_sup.c source file.
@@ -561,6 +566,42 @@
 extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
 extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
 
+/* qlafx00 related functions */
+extern int qlafx00_pci_config(struct scsi_qla_host *);
+extern int qlafx00_initialize_adapter(struct scsi_qla_host *);
+extern void qlafx00_soft_reset(scsi_qla_host_t *);
+extern int qlafx00_chip_diag(scsi_qla_host_t *);
+extern void qlafx00_config_rings(struct scsi_qla_host *);
+extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *);
+extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *);
+extern irqreturn_t qlafx00_intr_handler(int, void *);
+extern void qlafx00_enable_intrs(struct qla_hw_data *);
+extern void qlafx00_disable_intrs(struct qla_hw_data *);
+extern int qlafx00_abort_command(srb_t *);
+extern int qlafx00_abort_target(fc_port_t *, unsigned int, int);
+extern int qlafx00_lun_reset(fc_port_t *, unsigned int, int);
+extern int qlafx00_start_scsi(srb_t *);
+extern int qlafx00_abort_isp(scsi_qla_host_t *);
+extern int qlafx00_iospace_config(struct qla_hw_data *);
+extern int qlafx00_init_firmware(scsi_qla_host_t *, uint16_t);
+extern int qlafx00_fw_ready(scsi_qla_host_t *);
+extern int qlafx00_configure_devices(scsi_qla_host_t *);
+extern int qlafx00_reset_initialize(scsi_qla_host_t *);
+extern int qlafx00_fx_disc(scsi_qla_host_t *, fc_port_t *, uint8_t);
+extern int qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *);
+extern int qlafx00_post_aenfx_work(struct scsi_qla_host *,  uint32_t,
+				   uint32_t *, int);
+extern uint32_t qlafx00_fw_state_show(struct device *,
+				      struct device_attribute *, char *);
+extern void qlafx00_get_host_speed(struct Scsi_Host *);
+extern void qlafx00_init_response_q_entries(struct rsp_que *);
+
+extern void qlafx00_tm_iocb(srb_t *, struct tsk_mgmt_entry_fx00 *);
+extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *);
+extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *);
+extern void qlafx00_timer_routine(scsi_qla_host_t *);
+extern int qlafx00_rescan_isp(scsi_qla_host_t *);
+
 /* qla82xx related functions */
 
 /* PCI related functions */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 9b45525..d0ea8b9 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -639,9 +639,14 @@
 qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn)
 {
 	struct qla_hw_data *ha = vha->hw;
-	sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s",ha->model_number,
-	    ha->fw_major_version, ha->fw_minor_version,
-	    ha->fw_subminor_version, qla2x00_version_str);
+
+	if (IS_QLAFX00(ha))
+		sprintf(snn, "%s FW:v%s DVR:v%s", ha->model_number,
+		    ha->mr.fw_version, qla2x00_version_str);
+	else
+		sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
+		    ha->fw_major_version, ha->fw_minor_version,
+		    ha->fw_subminor_version, qla2x00_version_str);
 }
 
 /**
@@ -923,7 +928,7 @@
 		    sns_cmd->p.gpn_data[9] != 0x02) {
 			ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
 			    "GPN_ID failed, rejected request, gpn_rsp:\n");
-			ql_dump_buffer(ql_dbg_disc, vha, 0x207f,
+			ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
 			    sns_cmd->p.gpn_data, 16);
 			rval = QLA_FUNCTION_FAILED;
 		} else {
@@ -1718,7 +1723,8 @@
 	int rval;
        struct qla_hw_data *ha = vha->hw;
 
-	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+	if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
+	    IS_QLAFX00(ha))
 		return QLA_FUNCTION_FAILED;
 
 	rval = qla2x00_mgmt_svr_login(vha);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b592033..3565dfd 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -25,7 +25,6 @@
 */
 static int qla2x00_isp_firmware(scsi_qla_host_t *);
 static int qla2x00_setup_chip(scsi_qla_host_t *);
-static int qla2x00_init_rings(scsi_qla_host_t *);
 static int qla2x00_fw_ready(scsi_qla_host_t *);
 static int qla2x00_configure_hba(scsi_qla_host_t *);
 static int qla2x00_configure_loop(scsi_qla_host_t *);
@@ -83,7 +82,9 @@
 
 	/* Firmware should use switch negotiated r_a_tov for timeout. */
 	tmo = ha->r_a_tov / 10 * 2;
-	if (!IS_FWI2_CAPABLE(ha)) {
+	if (IS_QLAFX00(ha)) {
+		tmo = FX00_DEF_RATOV * 2;
+	} else if (!IS_FWI2_CAPABLE(ha)) {
 		/*
 		 * Except for earlier ISPs where the timeout is seeded from the
 		 * initialization control block.
@@ -1977,7 +1978,7 @@
  *
  * Returns 0 on success.
  */
-static int
+int
 qla2x00_init_rings(scsi_qla_host_t *vha)
 {
 	int	rval;
@@ -2012,7 +2013,10 @@
 		if (!rsp)
 			continue;
 		/* Initialize response queue entries */
-		qla2x00_init_response_q_entries(rsp);
+		if (IS_QLAFX00(ha))
+			qlafx00_init_response_q_entries(rsp);
+		else
+			qla2x00_init_response_q_entries(rsp);
 	}
 
 	ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
@@ -2024,11 +2028,16 @@
 
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
+	ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
+
+	if (IS_QLAFX00(ha)) {
+		rval = qlafx00_init_firmware(vha, ha->init_cb_size);
+		goto next_check;
+	}
+
 	/* Update any ISP specific firmware options before initialization. */
 	ha->isp_ops->update_fw_options(vha);
 
-	ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
-
 	if (ha->flags.npiv_supported) {
 		if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
 			ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
@@ -2042,6 +2051,7 @@
 	}
 
 	rval = qla2x00_init_firmware(vha, ha->init_cb_size);
+next_check:
 	if (rval) {
 		ql_log(ql_log_fatal, vha, 0x00d2,
 		    "Init Firmware **** FAILED ****.\n");
@@ -2069,6 +2079,9 @@
 	uint16_t	state[5];
 	struct qla_hw_data *ha = vha->hw;
 
+	if (IS_QLAFX00(vha->hw))
+		return qlafx00_fw_ready(vha);
+
 	rval = QLA_SUCCESS;
 
 	/* 20 seconds for loop down. */
@@ -3134,6 +3147,12 @@
 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
 	fcport->vha = vha;
+
+	if (IS_QLAFX00(vha->hw)) {
+		qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+		qla2x00_reg_remote_port(vha, fcport);
+		return;
+	}
 	fcport->login_retry = 0;
 	fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
 
@@ -3894,15 +3913,24 @@
 			/* Wait at most MAX_TARGET RSCNs for a stable link. */
 			wait_time = 256;
 			do {
-				/* Issue a marker after FW becomes ready. */
-				qla2x00_marker(vha, req, rsp, 0, 0,
-					MK_SYNC_ALL);
-				vha->marker_needed = 0;
+				if (!IS_QLAFX00(vha->hw)) {
+					/*
+					 * Issue a marker after FW becomes
+					 * ready.
+					 */
+					qla2x00_marker(vha, req, rsp, 0, 0,
+						MK_SYNC_ALL);
+					vha->marker_needed = 0;
+				}
 
 				/* Remap devices on Loop. */
 				clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
 
-				qla2x00_configure_loop(vha);
+				if (IS_QLAFX00(vha->hw))
+					qlafx00_configure_devices(vha);
+				else
+					qla2x00_configure_loop(vha);
+
 				wait_time--;
 			} while (!atomic_read(&vha->loop_down_timer) &&
 				!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
@@ -3968,9 +3996,7 @@
 			if (fcport->drport &&
 			    atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
 				spin_unlock_irqrestore(&ha->vport_slock, flags);
-
 				qla2x00_rport_del(fcport);
-
 				spin_lock_irqsave(&ha->vport_slock, flags);
 			}
 		}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 68e2c4a..98ab921 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -5,6 +5,28 @@
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 
+/**
+ * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
+ * Continuation Type 1 IOCBs to allocate.
+ *
+ * @dsds: number of data segment decriptors needed
+ *
+ * Returns the number of IOCB entries needed to store @dsds.
+ */
+static inline uint16_t
+qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
+{
+	uint16_t iocbs;
+
+	iocbs = 1;
+	if (dsds > 1) {
+		iocbs += (dsds - 1) / 5;
+		if ((dsds - 1) % 5)
+			iocbs++;
+	}
+	return iocbs;
+}
+
 /*
  * qla2x00_debounce_register
  *      Debounce register.
@@ -58,6 +80,17 @@
 }
 
 static inline void
+host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
+{
+	uint32_t *isrc = (uint32_t *) src;
+	uint32_t *odest = (uint32_t *) dst;
+	uint32_t iter = bsize >> 2;
+
+	for (; iter ; iter--)
+		*odest++ = cpu_to_le32(*isrc++);
+}
+
+static inline void
 qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
 {
 	int i;
@@ -213,12 +246,18 @@
 	sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
 	add_timer(&sp->u.iocb_cmd.timer);
 	sp->free = qla2x00_sp_free;
+	if ((IS_QLAFX00(sp->fcport->vha->hw)) &&
+	    (sp->type == SRB_FXIOCB_DCMD))
+		init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
 }
 
 static inline int
 qla2x00_gid_list_size(struct qla_hw_data *ha)
 {
-	return sizeof(struct gid_list_info) * ha->max_fibre_devices;
+	if (IS_QLAFX00(ha))
+		return sizeof(uint32_t) * 32;
+	else
+		return sizeof(struct gid_list_info) * ha->max_fibre_devices;
 }
 
 static inline void
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index d263031..15e4080 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -135,7 +135,8 @@
 	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
 
 	/* Load packet defaults. */
-	*((uint32_t *)(&cont_pkt->entry_type)) =
+	*((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
+	    __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
 	    __constant_cpu_to_le32(CONTINUE_A64_TYPE);
 
 	return (cont_pkt);
@@ -486,6 +487,10 @@
 		if (ha->mqenable || IS_QLA83XX(ha)) {
 			WRT_REG_DWORD(req->req_q_in, req->ring_index);
 			RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
+		} else if (IS_QLAFX00(ha)) {
+			WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
+			RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
+			QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
 		} else if (IS_FWI2_CAPABLE(ha)) {
 			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
 			RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
@@ -514,11 +519,12 @@
 			uint16_t lun, uint8_t type)
 {
 	mrk_entry_t *mrk;
-	struct mrk_entry_24xx *mrk24;
+	struct mrk_entry_24xx *mrk24 = NULL;
+	struct mrk_entry_fx00 *mrkfx = NULL;
+
 	struct qla_hw_data *ha = vha->hw;
 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
-	mrk24 = NULL;
 	req = ha->req_q_map[0];
 	mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
 	if (mrk == NULL) {
@@ -531,7 +537,15 @@
 	mrk->entry_type = MARKER_TYPE;
 	mrk->modifier = type;
 	if (type != MK_SYNC_ALL) {
-		if (IS_FWI2_CAPABLE(ha)) {
+		if (IS_QLAFX00(ha)) {
+			mrkfx = (struct mrk_entry_fx00 *) mrk;
+			mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle);
+			mrkfx->handle_hi = 0;
+			mrkfx->tgt_id = cpu_to_le16(loop_id);
+			mrkfx->lun[1] = LSB(lun);
+			mrkfx->lun[2] = MSB(lun);
+			host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun));
+		} else if (IS_FWI2_CAPABLE(ha)) {
 			mrk24 = (struct mrk_entry_24xx *) mrk;
 			mrk24->nport_handle = cpu_to_le16(loop_id);
 			mrk24->lun[1] = LSB(lun);
@@ -589,28 +603,6 @@
 	return QLA_SUCCESS;
 }
 
-/**
- * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
- * Continuation Type 1 IOCBs to allocate.
- *
- * @dsds: number of data segment decriptors needed
- *
- * Returns the number of IOCB entries needed to store @dsds.
- */
-inline uint16_t
-qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
-{
-	uint16_t iocbs;
-
-	iocbs = 1;
-	if (dsds > 1) {
-		iocbs += (dsds - 1) / 5;
-		if ((dsds - 1) % 5)
-			iocbs++;
-	}
-	return iocbs;
-}
-
 static inline int
 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
 	uint16_t tot_dsds)
@@ -1583,7 +1575,6 @@
 	return QLA_FUNCTION_FAILED;
 }
 
-
 /**
  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
  * @sp: command to send to the ISP
@@ -1852,6 +1843,8 @@
 			cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
 		else if (IS_FWI2_CAPABLE(ha))
 			cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
+		else if (IS_QLAFX00(ha))
+			cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
 		else
 			cnt = qla2x00_debounce_register(
 			    ISP_REQ_Q_OUT(ha, &reg->isp));
@@ -1869,8 +1862,13 @@
 	req->cnt -= req_cnt;
 	pkt = req->ring_ptr;
 	memset(pkt, 0, REQUEST_ENTRY_SIZE);
-	pkt->entry_count = req_cnt;
-	pkt->handle = handle;
+	if (IS_QLAFX00(ha)) {
+		WRT_REG_BYTE(&pkt->entry_count, req_cnt);
+		WRT_REG_WORD(&pkt->handle, handle);
+	} else {
+		pkt->entry_count = req_cnt;
+		pkt->handle = handle;
+	}
 
 queuing_error:
 	return pkt;
@@ -2625,7 +2623,16 @@
 		    qla2x00_adisc_iocb(sp, pkt);
 		break;
 	case SRB_TM_CMD:
-		qla24xx_tm_iocb(sp, pkt);
+		IS_QLAFX00(ha) ?
+		    qlafx00_tm_iocb(sp, pkt) :
+		    qla24xx_tm_iocb(sp, pkt);
+		break;
+	case SRB_FXIOCB_DCMD:
+	case SRB_FXIOCB_BCMD:
+		qlafx00_fxdisc_iocb(sp, pkt);
+		break;
+	case SRB_ABT_CMD:
+		qlafx00_abort_iocb(sp, pkt);
 		break;
 	default:
 		break;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index e9dbd74..259d920 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -13,11 +13,7 @@
 #include <scsi/scsi_bsg_fc.h>
 #include <scsi/scsi_eh.h>
 
-#include "qla_target.h"
-
 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
-static void qla2x00_process_completed_request(struct scsi_qla_host *,
-	struct req_que *, uint32_t);
 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
@@ -1065,9 +1061,9 @@
  * @ha: SCSI driver HA context
  * @index: SRB index
  */
-static void
+void
 qla2x00_process_completed_request(struct scsi_qla_host *vha,
-				struct req_que *req, uint32_t index)
+				  struct req_que *req, uint32_t index)
 {
 	srb_t *sp;
 	struct qla_hw_data *ha = vha->hw;
@@ -1101,7 +1097,7 @@
 	}
 }
 
-static srb_t *
+srb_t *
 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
     struct req_que *req, void *iocb)
 {
@@ -1994,7 +1990,7 @@
 		return;
 	}
 
-  	lscsi_status = scsi_status & STATUS_MASK;
+	lscsi_status = scsi_status & STATUS_MASK;
 
 	fcport = sp->fcport;
 
@@ -2939,7 +2935,7 @@
 
 	/* If possible, enable MSI-X. */
 	if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
-		!IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
+		!IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha))
 		goto skip_msi;
 
 	if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
@@ -2972,7 +2968,7 @@
 skip_msix:
 
 	if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
-	    !IS_QLA8001(ha) && !IS_QLA82XX(ha))
+	    !IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha))
 		goto skip_msi;
 
 	ret = pci_enable_msi(ha->pdev);
@@ -2998,9 +2994,11 @@
 		    "Failed to reserve interrupt %d already in use.\n",
 		    ha->pdev->irq);
 		goto fail;
-	} else if (!ha->flags.msi_enabled)
+	} else if (!ha->flags.msi_enabled) {
 		ql_dbg(ql_dbg_init, vha, 0x0125,
 		    "INTa mode: Enabled.\n");
+		ha->flags.mr_intr_valid = 1;
+	}
 
 clear_risc_ints:
 
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 43345af..9e5d89d 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -4113,7 +4113,6 @@
 	int rval;
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
-	uint32_t iter_cnt = 0x1;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
 	    "Entered %s.\n", __func__);
@@ -4139,8 +4138,8 @@
 	mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
 
 	/* Iteration count */
-	mcp->mb[18] = LSW(iter_cnt);
-	mcp->mb[19] = MSW(iter_cnt);
+	mcp->mb[18] = LSW(mreq->iteration_count);
+	mcp->mb[19] = MSW(mreq->iteration_count);
 
 	mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
 	    MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
@@ -4518,7 +4517,8 @@
 			goto done;
 
 		ql_log(ql_log_warn, vha, 0x10c9,
-		    "Thermal not supported by I2C.\n");
+		    "Thermal not supported through I2C bus, trying alternate "
+		    "method (ISP access).\n");
 		ha->thermal_support &= ~THERMAL_SUPPORT_I2C;
 	}
 
@@ -4528,7 +4528,7 @@
 			goto done;
 
 		ql_log(ql_log_warn, vha, 0x1019,
-		    "Thermal not supported by ISP.\n");
+		    "Thermal not supported through ISP.\n");
 		ha->thermal_support &= ~THERMAL_SUPPORT_ISP;
 	}
 
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
new file mode 100644
index 0000000..729b743
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -0,0 +1,3476 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_def.h"
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
+#include <scsi/scsi_tcq.h>
+#include <linux/utsname.h>
+
+
+/* QLAFX00 specific Mailbox implementation functions */
+
+/*
+ * qlafx00_mailbox_command
+ *	Issue mailbox command and waits for completion.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	mcp = driver internal mbx struct pointer.
+ *
+ * Output:
+ *	mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
+ *
+ * Returns:
+ *	0 : QLA_SUCCESS = cmd performed success
+ *	1 : QLA_FUNCTION_FAILED   (error encountered)
+ *	6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
+ *
+ * Context:
+ *	Kernel context.
+ */
+static int
+qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
+
+{
+	int		rval;
+	unsigned long    flags = 0;
+	device_reg_t __iomem *reg;
+	uint8_t		abort_active;
+	uint8_t		io_lock_on;
+	uint16_t	command = 0;
+	uint32_t	*iptr;
+	uint32_t __iomem *optr;
+	uint32_t	cnt;
+	uint32_t	mboxes;
+	unsigned long	wait_time;
+	struct qla_hw_data *ha = vha->hw;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+
+	if (ha->pdev->error_state > pci_channel_io_frozen) {
+		ql_log(ql_log_warn, vha, 0x115c,
+		    "error_state is greater than pci_channel_io_frozen, "
+		    "exiting.\n");
+		return QLA_FUNCTION_TIMEOUT;
+	}
+
+	if (vha->device_flags & DFLG_DEV_FAILED) {
+		ql_log(ql_log_warn, vha, 0x115f,
+		    "Device in failed state, exiting.\n");
+		return QLA_FUNCTION_TIMEOUT;
+	}
+
+	reg = ha->iobase;
+	io_lock_on = base_vha->flags.init_done;
+
+	rval = QLA_SUCCESS;
+	abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+
+	if (ha->flags.pci_channel_io_perm_failure) {
+		ql_log(ql_log_warn, vha, 0x1175,
+		    "Perm failure on EEH timeout MBX, exiting.\n");
+		return QLA_FUNCTION_TIMEOUT;
+	}
+
+	if (ha->flags.isp82xx_fw_hung) {
+		/* Setting Link-Down error */
+		mcp->mb[0] = MBS_LINK_DOWN_ERROR;
+		ql_log(ql_log_warn, vha, 0x1176,
+		    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
+		rval = QLA_FUNCTION_FAILED;
+		goto premature_exit;
+	}
+
+	/*
+	 * Wait for active mailbox commands to finish by waiting at most tov
+	 * seconds. This is to serialize actual issuing of mailbox cmds during
+	 * non ISP abort time.
+	 */
+	if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
+		/* Timeout occurred. Return error. */
+		ql_log(ql_log_warn, vha, 0x1177,
+		    "Cmd access timeout, cmd=0x%x, Exiting.\n",
+		    mcp->mb[0]);
+		return QLA_FUNCTION_TIMEOUT;
+	}
+
+	ha->flags.mbox_busy = 1;
+	/* Save mailbox command for debug */
+	ha->mcp32 = mcp;
+
+	ql_dbg(ql_dbg_mbx, vha, 0x1178,
+	    "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Load mailbox registers. */
+	optr = (uint32_t __iomem *)&reg->ispfx00.mailbox0;
+
+	iptr = mcp->mb;
+	command = mcp->mb[0];
+	mboxes = mcp->out_mb;
+
+	for (cnt = 0; cnt < ha->mbx_count; cnt++) {
+		if (mboxes & BIT_0)
+			WRT_REG_DWORD(optr, *iptr);
+
+		mboxes >>= 1;
+		optr++;
+		iptr++;
+	}
+
+	/* Issue set host interrupt command to send cmd out. */
+	ha->flags.mbox_int = 0;
+	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+	ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172,
+	    (uint8_t *)mcp->mb, 16);
+	ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173,
+	    ((uint8_t *)mcp->mb + 0x10), 16);
+	ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174,
+	    ((uint8_t *)mcp->mb + 0x20), 8);
+
+	/* Unlock mbx registers and wait for interrupt */
+	ql_dbg(ql_dbg_mbx, vha, 0x1179,
+	    "Going to unlock irq & waiting for interrupts. "
+	    "jiffies=%lx.\n", jiffies);
+
+	/* Wait for mbx cmd completion until timeout */
+	if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
+		set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+
+		QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
+
+		clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+
+	} else {
+		ql_dbg(ql_dbg_mbx, vha, 0x112c,
+		    "Cmd=%x Polling Mode.\n", command);
+
+		QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+		wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
+		while (!ha->flags.mbox_int) {
+			if (time_after(jiffies, wait_time))
+				break;
+
+			/* Check for pending interrupts. */
+			qla2x00_poll(ha->rsp_q_map[0]);
+
+			if (!ha->flags.mbox_int &&
+			    !(IS_QLA2200(ha) &&
+			    command == MBC_LOAD_RISC_RAM_EXTENDED))
+				usleep_range(10000, 11000);
+		} /* while */
+		ql_dbg(ql_dbg_mbx, vha, 0x112d,
+		    "Waited %d sec.\n",
+		    (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
+	}
+
+	/* Check whether we timed out */
+	if (ha->flags.mbox_int) {
+		uint32_t *iptr2;
+
+		ql_dbg(ql_dbg_mbx, vha, 0x112e,
+		    "Cmd=%x completed.\n", command);
+
+		/* Got interrupt. Clear the flag. */
+		ha->flags.mbox_int = 0;
+		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+		if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE)
+			rval = QLA_FUNCTION_FAILED;
+
+		/* Load return mailbox registers. */
+		iptr2 = mcp->mb;
+		iptr = (uint32_t *)&ha->mailbox_out32[0];
+		mboxes = mcp->in_mb;
+		for (cnt = 0; cnt < ha->mbx_count; cnt++) {
+			if (mboxes & BIT_0)
+				*iptr2 = *iptr;
+
+			mboxes >>= 1;
+			iptr2++;
+			iptr++;
+		}
+	} else {
+
+		rval = QLA_FUNCTION_TIMEOUT;
+	}
+
+	ha->flags.mbox_busy = 0;
+
+	/* Clean up */
+	ha->mcp32 = NULL;
+
+	if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
+		ql_dbg(ql_dbg_mbx, vha, 0x113a,
+		    "checking for additional resp interrupt.\n");
+
+		/* polling mode for non isp_abort commands. */
+		qla2x00_poll(ha->rsp_q_map[0]);
+	}
+
+	if (rval == QLA_FUNCTION_TIMEOUT &&
+	    mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
+		if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
+		    ha->flags.eeh_busy) {
+			/* not in dpc. schedule it for dpc to take over. */
+			ql_dbg(ql_dbg_mbx, vha, 0x115d,
+			    "Timeout, schedule isp_abort_needed.\n");
+
+			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
+			    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+			    !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+
+				ql_log(ql_log_info, base_vha, 0x115e,
+				    "Mailbox cmd timeout occurred, cmd=0x%x, "
+				    "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
+				    "abort.\n", command, mcp->mb[0],
+				    ha->flags.eeh_busy);
+				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+				qla2xxx_wake_dpc(vha);
+			}
+		} else if (!abort_active) {
+			/* call abort directly since we are in the DPC thread */
+			ql_dbg(ql_dbg_mbx, vha, 0x1160,
+			    "Timeout, calling abort_isp.\n");
+
+			if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
+			    !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+			    !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
+
+				ql_log(ql_log_info, base_vha, 0x1161,
+				    "Mailbox cmd timeout occurred, cmd=0x%x, "
+				    "mb[0]=0x%x. Scheduling ISP abort ",
+				    command, mcp->mb[0]);
+
+				set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
+				clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+				if (ha->isp_ops->abort_isp(vha)) {
+					/* Failed. retry later. */
+					set_bit(ISP_ABORT_NEEDED,
+					    &vha->dpc_flags);
+				}
+				clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
+				ql_dbg(ql_dbg_mbx, vha, 0x1162,
+				    "Finished abort_isp.\n");
+			}
+		}
+	}
+
+premature_exit:
+	/* Allow next mbx cmd to come in. */
+	complete(&ha->mbx_cmd_comp);
+
+	if (rval) {
+		ql_log(ql_log_warn, base_vha, 0x1163,
+		    "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, "
+		    "mb[3]=%x, cmd=%x ****.\n",
+		    mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
+	} else {
+		ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qlafx00_driver_shutdown
+ *	Indicate a driver shutdown to firmware.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *
+ * Returns:
+ *	local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+static int
+qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo)
+{
+	int rval;
+	struct mbx_cmd_32 mc;
+	struct mbx_cmd_32 *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_MR_DRV_SHUTDOWN;
+	mcp->out_mb = MBX_0;
+	mcp->in_mb = MBX_0;
+	if (tmo)
+		mcp->tov = tmo;
+	else
+		mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qlafx00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1167,
+		    "Failed=%x.\n", rval);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168,
+		    "Done %s.\n", __func__);
+	}
+
+	return rval;
+}
+
+/*
+ * qlafx00_get_firmware_state
+ *	Get adapter firmware state.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	TARGET_QUEUE_LOCK must be released.
+ *	ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *	qla7xxx local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+static int
+qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states)
+{
+	int rval;
+	struct mbx_cmd_32 mc;
+	struct mbx_cmd_32 *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
+	mcp->out_mb = MBX_0;
+	mcp->in_mb = MBX_1|MBX_0;
+	mcp->tov = MBX_TOV_SECONDS;
+	mcp->flags = 0;
+	rval = qlafx00_mailbox_command(vha, mcp);
+
+	/* Return firmware states. */
+	states[0] = mcp->mb[1];
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x116a,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b,
+		    "Done %s.\n", __func__);
+	}
+	return rval;
+}
+
+/*
+ * qlafx00_init_firmware
+ *	Initialize adapter firmware.
+ *
+ * Input:
+ *	ha = adapter block pointer.
+ *	dptr = Initialization control block pointer.
+ *	size = size of initialization control block.
+ *	TARGET_QUEUE_LOCK must be released.
+ *	ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ *	qlafx00 local function return status code.
+ *
+ * Context:
+ *	Kernel context.
+ */
+int
+qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
+{
+	int rval;
+	struct mbx_cmd_32 mc;
+	struct mbx_cmd_32 *mcp = &mc;
+	struct qla_hw_data *ha = vha->hw;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c,
+	    "Entered %s.\n", __func__);
+
+	mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
+
+	mcp->mb[1] = 0;
+	mcp->mb[2] = MSD(ha->init_cb_dma);
+	mcp->mb[3] = LSD(ha->init_cb_dma);
+
+	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_0;
+	mcp->buf_size = size;
+	mcp->flags = MBX_DMA_OUT;
+	mcp->tov = MBX_TOV_SECONDS;
+	rval = qlafx00_mailbox_command(vha, mcp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x116d,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e,
+		    "Done %s.\n", __func__);
+	}
+	return rval;
+}
+
+/*
+ * qlafx00_mbx_reg_test
+ */
+static int
+qlafx00_mbx_reg_test(scsi_qla_host_t *vha)
+{
+	int rval;
+	struct mbx_cmd_32 mc;
+	struct mbx_cmd_32 *mcp = &mc;
+
+	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f,
+	    "Entered %s.\n", __func__);
+
+
+	mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
+	mcp->mb[1] = 0xAAAA;
+	mcp->mb[2] = 0x5555;
+	mcp->mb[3] = 0xAA55;
+	mcp->mb[4] = 0x55AA;
+	mcp->mb[5] = 0xA5A5;
+	mcp->mb[6] = 0x5A5A;
+	mcp->mb[7] = 0x2525;
+	mcp->mb[8] = 0xBBBB;
+	mcp->mb[9] = 0x6666;
+	mcp->mb[10] = 0xBB66;
+	mcp->mb[11] = 0x66BB;
+	mcp->mb[12] = 0xB6B6;
+	mcp->mb[13] = 0x6B6B;
+	mcp->mb[14] = 0x3636;
+	mcp->mb[15] = 0xCCCC;
+
+
+	mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
+			MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
+			MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+	mcp->buf_size = 0;
+	mcp->flags = MBX_DMA_OUT;
+	mcp->tov = MBX_TOV_SECONDS;
+	rval = qlafx00_mailbox_command(vha, mcp);
+	if (rval == QLA_SUCCESS) {
+		if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 ||
+		    mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA)
+			rval = QLA_FUNCTION_FAILED;
+		if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A ||
+		    mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB)
+			rval = QLA_FUNCTION_FAILED;
+		if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 ||
+		    mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6)
+			rval = QLA_FUNCTION_FAILED;
+		if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 ||
+		    mcp->mb[31] != 0xCCCC)
+			rval = QLA_FUNCTION_FAILED;
+	}
+
+	if (rval != QLA_SUCCESS) {
+		ql_dbg(ql_dbg_mbx, vha, 0x1170,
+		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+	} else {
+		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171,
+		    "Done %s.\n", __func__);
+	}
+	return rval;
+}
+
+/**
+ * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qlafx00_pci_config(scsi_qla_host_t *vha)
+{
+	uint16_t w;
+	struct qla_hw_data *ha = vha->hw;
+
+	pci_set_master(ha->pdev);
+	pci_try_set_mwi(ha->pdev);
+
+	pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
+	w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
+	w &= ~PCI_COMMAND_INTX_DISABLE;
+	pci_write_config_word(ha->pdev, PCI_COMMAND, w);
+
+	/* PCIe -- adjust Maximum Read Request Size (2048). */
+	if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
+		pcie_set_readrq(ha->pdev, 2048);
+
+	ha->chip_revision = ha->pdev->revision;
+
+	return QLA_SUCCESS;
+}
+
+/**
+ * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
+ * @ha: HA context
+ *
+  */
+static inline void
+qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
+{
+	unsigned long flags = 0;
+	struct qla_hw_data *ha = vha->hw;
+	int i, core;
+	uint32_t cnt;
+
+	/* Set all 4 cores in reset */
+	for (i = 0; i < 4; i++) {
+		QLAFX00_SET_HBA_SOC_REG(ha,
+		    (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01));
+	}
+
+	/* Set all 4 core Clock gating control */
+	for (i = 0; i < 4; i++) {
+		QLAFX00_SET_HBA_SOC_REG(ha,
+		    (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101));
+	}
+
+	/* Reset all units in Fabric */
+	QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x11F0101));
+
+	/* Reset all interrupt control registers */
+	for (i = 0; i < 115; i++) {
+		QLAFX00_SET_HBA_SOC_REG(ha,
+		    (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0));
+	}
+
+	/* Reset Timers control registers. per core */
+	for (core = 0; core < 4; core++)
+		for (i = 0; i < 8; i++)
+			QLAFX00_SET_HBA_SOC_REG(ha,
+			    (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0));
+
+	/* Reset per core IRQ ack register */
+	for (core = 0; core < 4; core++)
+		QLAFX00_SET_HBA_SOC_REG(ha,
+		    (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF));
+
+	/* Set Fabric control and config to defaults */
+	QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));
+	QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3));
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Kick in Fabric units */
+	QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));
+
+	/* Kick in Core0 to start boot process */
+	QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00));
+
+	/* Wait 10secs for soft-reset to complete. */
+	for (cnt = 10; cnt; cnt--) {
+		msleep(1000);
+		barrier();
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+/**
+ * qlafx00_soft_reset() - Soft Reset ISPFx00.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+void
+qlafx00_soft_reset(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (unlikely(pci_channel_offline(ha->pdev) &&
+	    ha->flags.pci_channel_io_perm_failure))
+		return;
+
+	ha->isp_ops->disable_intrs(ha);
+	qlafx00_soc_cpu_reset(vha);
+	ha->isp_ops->enable_intrs(ha);
+}
+
+/**
+ * qlafx00_chip_diag() - Test ISPFx00 for proper operation.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qlafx00_chip_diag(scsi_qla_host_t *vha)
+{
+	int rval = 0;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+
+	ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
+
+	rval = qlafx00_mbx_reg_test(vha);
+	if (rval) {
+		ql_log(ql_log_warn, vha, 0x1165,
+		    "Failed mailbox send register test\n");
+	} else {
+		/* Flag a successful rval */
+		rval = QLA_SUCCESS;
+	}
+	return rval;
+}
+
+void
+qlafx00_config_rings(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+	struct init_cb_fx *icb;
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
+
+	/* Setup ring parameters in initialization control block. */
+	icb = (struct init_cb_fx *)ha->init_cb;
+	icb->request_q_outpointer = __constant_cpu_to_le16(0);
+	icb->response_q_inpointer = __constant_cpu_to_le16(0);
+	icb->request_q_length = cpu_to_le16(req->length);
+	icb->response_q_length = cpu_to_le16(rsp->length);
+	icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
+	icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
+	icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
+	icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+
+	WRT_REG_DWORD(&reg->req_q_in, 0);
+	WRT_REG_DWORD(&reg->req_q_out, 0);
+
+	WRT_REG_DWORD(&reg->rsp_q_in, 0);
+	WRT_REG_DWORD(&reg->rsp_q_out, 0);
+
+	/* PCI posting */
+	RD_REG_DWORD(&reg->rsp_q_out);
+}
+
+char *
+qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
+{
+	struct qla_hw_data *ha = vha->hw;
+	int pcie_reg;
+
+	pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
+	if (pcie_reg) {
+		strcpy(str, "PCIe iSA");
+		return str;
+	}
+	return str;
+}
+
+char *
+qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	sprintf(str, "%s", ha->mr.fw_version);
+	return str;
+}
+
+void
+qlafx00_enable_intrs(struct qla_hw_data *ha)
+{
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	ha->interrupts_on = 1;
+	QLAFX00_ENABLE_ICNTRL_REG(ha);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void
+qlafx00_disable_intrs(struct qla_hw_data *ha)
+{
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	ha->interrupts_on = 0;
+	QLAFX00_DISABLE_ICNTRL_REG(ha);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void
+qlafx00_tmf_iocb_timeout(void *data)
+{
+	srb_t *sp = (srb_t *)data;
+	struct srb_iocb *tmf = &sp->u.iocb_cmd;
+
+	tmf->u.tmf.comp_status = CS_TIMEOUT;
+	complete(&tmf->u.tmf.comp);
+}
+
+static void
+qlafx00_tmf_sp_done(void *data, void *ptr, int res)
+{
+	srb_t *sp = (srb_t *)ptr;
+	struct srb_iocb *tmf = &sp->u.iocb_cmd;
+
+	complete(&tmf->u.tmf.comp);
+}
+
+static int
+qlafx00_async_tm_cmd(fc_port_t *fcport, uint32_t flags,
+		     uint32_t lun, uint32_t tag)
+{
+	scsi_qla_host_t *vha = fcport->vha;
+	struct srb_iocb *tm_iocb;
+	srb_t *sp;
+	int rval = QLA_FUNCTION_FAILED;
+
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp)
+		goto done;
+
+	tm_iocb = &sp->u.iocb_cmd;
+	sp->type = SRB_TM_CMD;
+	sp->name = "tmf";
+	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+	tm_iocb->u.tmf.flags = flags;
+	tm_iocb->u.tmf.lun = lun;
+	tm_iocb->u.tmf.data = tag;
+	sp->done = qlafx00_tmf_sp_done;
+	tm_iocb->timeout = qlafx00_tmf_iocb_timeout;
+	init_completion(&tm_iocb->u.tmf.comp);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
+	ql_dbg(ql_dbg_async, vha, 0x507b,
+	    "Task management command issued target_id=%x\n",
+	    fcport->tgt_id);
+
+	wait_for_completion(&tm_iocb->u.tmf.comp);
+
+	rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
+	    QLA_SUCCESS : QLA_FUNCTION_FAILED;
+
+done_free_sp:
+	sp->free(vha, sp);
+done:
+	return rval;
+}
+
+int
+qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag)
+{
+	return qlafx00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
+}
+
+int
+qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)
+{
+	return qlafx00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
+}
+
+int
+qlafx00_iospace_config(struct qla_hw_data *ha)
+{
+	if (pci_request_selected_regions(ha->pdev, ha->bars,
+	    QLA2XXX_DRIVER_NAME)) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x014e,
+		    "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
+		    pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+
+	/* Use MMIO operations for all accesses. */
+	if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
+		ql_log_pci(ql_log_warn, ha->pdev, 0x014f,
+		    "Invalid pci I/O region size (%s).\n",
+		    pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+	if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) {
+		ql_log_pci(ql_log_warn, ha->pdev, 0x0127,
+		    "Invalid PCI mem BAR0 region size (%s), aborting\n",
+			pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+
+	ha->cregbase =
+	    ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
+	if (!ha->cregbase) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
+		    "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+
+	if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) {
+		ql_log_pci(ql_log_warn, ha->pdev, 0x0129,
+		    "region #2 not an MMIO resource (%s), aborting\n",
+		    pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+	if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) {
+		ql_log_pci(ql_log_warn, ha->pdev, 0x012a,
+		    "Invalid PCI mem BAR2 region size (%s), aborting\n",
+			pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+
+	ha->iobase =
+	    ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
+	if (!ha->iobase) {
+		ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
+		    "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
+		goto iospace_error_exit;
+	}
+
+	/* Determine queue resources */
+	ha->max_req_queues = ha->max_rsp_queues = 1;
+
+	ql_log_pci(ql_log_info, ha->pdev, 0x012c,
+	    "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n",
+	    ha->bars, ha->cregbase, ha->iobase);
+
+	return 0;
+
+iospace_error_exit:
+	return -ENOMEM;
+}
+
+static void
+qlafx00_save_queue_ptrs(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
+
+	req->length_fx00 = req->length;
+	req->ring_fx00 = req->ring;
+	req->dma_fx00 = req->dma;
+
+	rsp->length_fx00 = rsp->length;
+	rsp->ring_fx00 = rsp->ring;
+	rsp->dma_fx00 = rsp->dma;
+
+	ql_dbg(ql_dbg_init, vha, 0x012d,
+	    "req: %p, ring_fx00: %p, length_fx00: 0x%x,"
+	    "req->dma_fx00: 0x%llx\n", req, req->ring_fx00,
+	    req->length_fx00, (u64)req->dma_fx00);
+
+	ql_dbg(ql_dbg_init, vha, 0x012e,
+	    "rsp: %p, ring_fx00: %p, length_fx00: 0x%x,"
+	    "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00,
+	    rsp->length_fx00, (u64)rsp->dma_fx00);
+}
+
+static int
+qlafx00_config_queues(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+	struct rsp_que *rsp = ha->rsp_q_map[0];
+	dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2);
+
+	req->length = ha->req_que_len;
+	req->ring = (void *)ha->iobase + ha->req_que_off;
+	req->dma = bar2_hdl + ha->req_que_off;
+	if ((!req->ring) || (req->length == 0)) {
+		ql_log_pci(ql_log_info, ha->pdev, 0x012f,
+		    "Unable to allocate memory for req_ring\n");
+		return QLA_FUNCTION_FAILED;
+	}
+
+	ql_dbg(ql_dbg_init, vha, 0x0130,
+	    "req: %p req_ring pointer %p req len 0x%x "
+	    "req off 0x%x\n, req->dma: 0x%llx",
+	    req, req->ring, req->length,
+	    ha->req_que_off, (u64)req->dma);
+
+	rsp->length = ha->rsp_que_len;
+	rsp->ring = (void *)ha->iobase + ha->rsp_que_off;
+	rsp->dma = bar2_hdl + ha->rsp_que_off;
+	if ((!rsp->ring) || (rsp->length == 0)) {
+		ql_log_pci(ql_log_info, ha->pdev, 0x0131,
+		    "Unable to allocate memory for rsp_ring\n");
+		return QLA_FUNCTION_FAILED;
+	}
+
+	ql_dbg(ql_dbg_init, vha, 0x0132,
+	    "rsp: %p rsp_ring pointer %p rsp len 0x%x "
+	    "rsp off 0x%x, rsp->dma: 0x%llx\n",
+	    rsp, rsp->ring, rsp->length,
+	    ha->rsp_que_off, (u64)rsp->dma);
+
+	return QLA_SUCCESS;
+}
+
+static int
+qlafx00_init_fw_ready(scsi_qla_host_t *vha)
+{
+	int rval = 0;
+	unsigned long wtime;
+	uint16_t wait_time;	/* Wait time */
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+	uint32_t aenmbx, aenmbx7 = 0;
+	uint32_t state[5];
+	bool done = false;
+
+	/* 30 seconds wait - Adjust if required */
+	wait_time = 30;
+
+	/* wait time before firmware ready */
+	wtime = jiffies + (wait_time * HZ);
+	do {
+		aenmbx = RD_REG_DWORD(&reg->aenmailbox0);
+		barrier();
+		ql_dbg(ql_dbg_mbx, vha, 0x0133,
+		    "aenmbx: 0x%x\n", aenmbx);
+
+		switch (aenmbx) {
+		case MBA_FW_NOT_STARTED:
+		case MBA_FW_STARTING:
+			break;
+
+		case MBA_SYSTEM_ERR:
+		case MBA_REQ_TRANSFER_ERR:
+		case MBA_RSP_TRANSFER_ERR:
+		case MBA_FW_INIT_FAILURE:
+			qlafx00_soft_reset(vha);
+			break;
+
+		case MBA_FW_RESTART_CMPLT:
+			/* Set the mbx and rqstq intr code */
+			aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
+			ha->mbx_intr_code = MSW(aenmbx7);
+			ha->rqstq_intr_code = LSW(aenmbx7);
+			ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
+			ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
+			ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
+			ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
+			WRT_REG_DWORD(&reg->aenmailbox0, 0);
+			RD_REG_DWORD_RELAXED(&reg->aenmailbox0);
+			ql_dbg(ql_dbg_init, vha, 0x0134,
+			    "f/w returned mbx_intr_code: 0x%x, "
+			    "rqstq_intr_code: 0x%x\n",
+			    ha->mbx_intr_code, ha->rqstq_intr_code);
+			QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+			rval = QLA_SUCCESS;
+			done = true;
+			break;
+
+		default:
+			/* If fw is apparently not ready. In order to continue,
+			 * we might need to issue Mbox cmd, but the problem is
+			 * that the DoorBell vector values that come with the
+			 * 8060 AEN are most likely gone by now (and thus no
+			 * bell would be rung on the fw side when mbox cmd is
+			 * issued). We have to therefore grab the 8060 AEN
+			 * shadow regs (filled in by FW when the last 8060
+			 * AEN was being posted).
+			 * Do the following to determine what is needed in
+			 * order to get the FW ready:
+			 * 1. reload the 8060 AEN values from the shadow regs
+			 * 2. clear int status to get rid of possible pending
+			 *    interrupts
+			 * 3. issue Get FW State Mbox cmd to determine fw state
+			 * Set the mbx and rqstq intr code from Shadow Regs
+			 */
+			aenmbx7 = RD_REG_DWORD(&reg->initval7);
+			ha->mbx_intr_code = MSW(aenmbx7);
+			ha->rqstq_intr_code = LSW(aenmbx7);
+			ha->req_que_off = RD_REG_DWORD(&reg->initval1);
+			ha->rsp_que_off = RD_REG_DWORD(&reg->initval3);
+			ha->req_que_len = RD_REG_DWORD(&reg->initval5);
+			ha->rsp_que_len = RD_REG_DWORD(&reg->initval6);
+			ql_dbg(ql_dbg_init, vha, 0x0135,
+			    "f/w returned mbx_intr_code: 0x%x, "
+			    "rqstq_intr_code: 0x%x\n",
+			    ha->mbx_intr_code, ha->rqstq_intr_code);
+			QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+
+			/* Get the FW state */
+			rval = qlafx00_get_firmware_state(vha, state);
+			if (rval != QLA_SUCCESS) {
+				/* Retry if timer has not expired */
+				break;
+			}
+
+			if (state[0] == FSTATE_FX00_CONFIG_WAIT) {
+				/* Firmware is waiting to be
+				 * initialized by driver
+				 */
+				rval = QLA_SUCCESS;
+				done = true;
+				break;
+			}
+
+			/* Issue driver shutdown and wait until f/w recovers.
+			 * Driver should continue to poll until 8060 AEN is
+			 * received indicating firmware recovery.
+			 */
+			ql_dbg(ql_dbg_init, vha, 0x0136,
+			    "Sending Driver shutdown fw_state 0x%x\n",
+			    state[0]);
+
+			rval = qlafx00_driver_shutdown(vha, 10);
+			if (rval != QLA_SUCCESS) {
+				rval = QLA_FUNCTION_FAILED;
+				break;
+			}
+			msleep(500);
+
+			wtime = jiffies + (wait_time * HZ);
+			break;
+		}
+
+		if (!done) {
+			if (time_after_eq(jiffies, wtime)) {
+				ql_dbg(ql_dbg_init, vha, 0x0137,
+				    "Init f/w failed: aen[7]: 0x%x\n",
+				    RD_REG_DWORD(&reg->aenmailbox7));
+				rval = QLA_FUNCTION_FAILED;
+				done = true;
+				break;
+			}
+			/* Delay for a while */
+			msleep(500);
+		}
+	} while (!done);
+
+	if (rval)
+		ql_dbg(ql_dbg_init, vha, 0x0138,
+		    "%s **** FAILED ****.\n", __func__);
+	else
+		ql_dbg(ql_dbg_init, vha, 0x0139,
+		    "%s **** SUCCESS ****.\n", __func__);
+
+	return rval;
+}
+
+/*
+ * qlafx00_fw_ready() - Waits for firmware ready.
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+int
+qlafx00_fw_ready(scsi_qla_host_t *vha)
+{
+	int		rval;
+	unsigned long	wtime;
+	uint16_t	wait_time;	/* Wait time if loop is coming ready */
+	uint32_t	state[5];
+
+	rval = QLA_SUCCESS;
+
+	wait_time = 10;
+
+	/* wait time before firmware ready */
+	wtime = jiffies + (wait_time * HZ);
+
+	/* Wait for ISP to finish init */
+	if (!vha->flags.init_done)
+		ql_dbg(ql_dbg_init, vha, 0x013a,
+		    "Waiting for init to complete...\n");
+
+	do {
+		rval = qlafx00_get_firmware_state(vha, state);
+
+		if (rval == QLA_SUCCESS) {
+			if (state[0] == FSTATE_FX00_INITIALIZED) {
+				ql_dbg(ql_dbg_init, vha, 0x013b,
+				    "fw_state=%x\n", state[0]);
+				rval = QLA_SUCCESS;
+					break;
+			}
+		}
+		rval = QLA_FUNCTION_FAILED;
+
+		if (time_after_eq(jiffies, wtime))
+			break;
+
+		/* Delay for a while */
+		msleep(500);
+
+		ql_dbg(ql_dbg_init, vha, 0x013c,
+		    "fw_state=%x curr time=%lx.\n", state[0], jiffies);
+	} while (1);
+
+
+	if (rval)
+		ql_dbg(ql_dbg_init, vha, 0x013d,
+		    "Firmware ready **** FAILED ****.\n");
+	else
+		ql_dbg(ql_dbg_init, vha, 0x013e,
+		    "Firmware ready **** SUCCESS ****.\n");
+
+	return rval;
+}
+
+static int
+qlafx00_find_all_targets(scsi_qla_host_t *vha,
+	struct list_head *new_fcports)
+{
+	int		rval;
+	uint16_t	tgt_id;
+	fc_port_t	*fcport, *new_fcport;
+	int		found;
+	struct qla_hw_data *ha = vha->hw;
+
+	rval = QLA_SUCCESS;
+
+	if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
+		return QLA_FUNCTION_FAILED;
+
+	if ((atomic_read(&vha->loop_down_timer) ||
+	     STATE_TRANSITION(vha))) {
+		atomic_set(&vha->loop_down_timer, 0);
+		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+		return QLA_FUNCTION_FAILED;
+	}
+
+	ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088,
+	    "Listing Target bit map...\n");
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha,
+	    0x2089, (uint8_t *)ha->gid_list, 32);
+
+	/* Allocate temporary rmtport for any new rmtports discovered. */
+	new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+	if (new_fcport == NULL)
+		return QLA_MEMORY_ALLOC_FAILED;
+
+	for_each_set_bit(tgt_id, (void *)ha->gid_list,
+	    QLAFX00_TGT_NODE_LIST_SIZE) {
+
+		/* Send get target node info */
+		new_fcport->tgt_id = tgt_id;
+		rval = qlafx00_fx_disc(vha, new_fcport,
+		    FXDISC_GET_TGT_NODE_INFO);
+		if (rval != QLA_SUCCESS) {
+			ql_log(ql_log_warn, vha, 0x208a,
+			    "Target info scan failed -- assuming zero-entry "
+			    "result...\n");
+			continue;
+		}
+
+		/* Locate matching device in database. */
+		found = 0;
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+			if (memcmp(new_fcport->port_name,
+			    fcport->port_name, WWN_SIZE))
+				continue;
+
+			found++;
+
+			/*
+			 * If tgt_id is same and state FCS_ONLINE, nothing
+			 * changed.
+			 */
+			if (fcport->tgt_id == new_fcport->tgt_id &&
+			    atomic_read(&fcport->state) == FCS_ONLINE)
+				break;
+
+			/*
+			 * Tgt ID changed or device was marked to be updated.
+			 */
+			ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b,
+			    "TGT-ID Change(%s): Present tgt id: "
+			    "0x%x state: 0x%x "
+			    "wwnn = %llx wwpn = %llx.\n",
+			    __func__, fcport->tgt_id,
+			    atomic_read(&fcport->state),
+			    (unsigned long long)wwn_to_u64(fcport->node_name),
+			    (unsigned long long)wwn_to_u64(fcport->port_name));
+
+			ql_log(ql_log_info, vha, 0x208c,
+			    "TGT-ID Announce(%s): Discovered tgt "
+			    "id 0x%x wwnn = %llx "
+			    "wwpn = %llx.\n", __func__, new_fcport->tgt_id,
+			    (unsigned long long)
+			    wwn_to_u64(new_fcport->node_name),
+			    (unsigned long long)
+			    wwn_to_u64(new_fcport->port_name));
+
+			if (atomic_read(&fcport->state) != FCS_ONLINE) {
+				fcport->old_tgt_id = fcport->tgt_id;
+				fcport->tgt_id = new_fcport->tgt_id;
+				ql_log(ql_log_info, vha, 0x208d,
+				   "TGT-ID: New fcport Added: %p\n", fcport);
+				qla2x00_update_fcport(vha, fcport);
+			} else {
+				ql_log(ql_log_info, vha, 0x208e,
+				    " Existing TGT-ID %x did not get "
+				    " offline event from firmware.\n",
+				    fcport->old_tgt_id);
+				qla2x00_mark_device_lost(vha, fcport, 0, 0);
+				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+				kfree(new_fcport);
+				return rval;
+			}
+			break;
+		}
+
+		if (found)
+			continue;
+
+		/* If device was not in our fcports list, then add it. */
+		list_add_tail(&new_fcport->list, new_fcports);
+
+		/* Allocate a new replacement fcport. */
+		new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+		if (new_fcport == NULL)
+			return QLA_MEMORY_ALLOC_FAILED;
+	}
+
+	kfree(new_fcport);
+	return rval;
+}
+
+/*
+ * qlafx00_configure_all_targets
+ *      Setup target devices with node ID's.
+ *
+ * Input:
+ *      ha = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success.
+ *      BIT_0 = error
+ */
+static int
+qlafx00_configure_all_targets(scsi_qla_host_t *vha)
+{
+	int rval;
+	fc_port_t *fcport, *rmptemp;
+	LIST_HEAD(new_fcports);
+
+	rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
+	    FXDISC_GET_TGT_NODE_LIST);
+	if (rval != QLA_SUCCESS) {
+		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+		return rval;
+	}
+
+	rval = qlafx00_find_all_targets(vha, &new_fcports);
+	if (rval != QLA_SUCCESS) {
+		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+		return rval;
+	}
+
+	/*
+	 * Delete all previous devices marked lost.
+	 */
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+			break;
+
+		if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
+			if (fcport->port_type != FCT_INITIATOR)
+				qla2x00_mark_device_lost(vha, fcport, 0, 0);
+		}
+	}
+
+	/*
+	 * Add the new devices to our devices list.
+	 */
+	list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
+		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+			break;
+
+		qla2x00_update_fcport(vha, fcport);
+		list_move_tail(&fcport->list, &vha->vp_fcports);
+		ql_log(ql_log_info, vha, 0x208f,
+		    "Attach new target id 0x%x wwnn = %llx "
+		    "wwpn = %llx.\n",
+		    fcport->tgt_id,
+		    (unsigned long long)wwn_to_u64(fcport->node_name),
+		    (unsigned long long)wwn_to_u64(fcport->port_name));
+	}
+
+	/* Free all new device structures not processed. */
+	list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
+		list_del(&fcport->list);
+		kfree(fcport);
+	}
+
+	return rval;
+}
+
+/*
+ * qlafx00_configure_devices
+ *      Updates Fibre Channel Device Database with what is actually on loop.
+ *
+ * Input:
+ *      ha                = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success.
+ *      1 = error.
+ *      2 = database was full and device was not configured.
+ */
+int
+qlafx00_configure_devices(scsi_qla_host_t *vha)
+{
+	int  rval;
+	unsigned long flags, save_flags;
+	rval = QLA_SUCCESS;
+
+	save_flags = flags = vha->dpc_flags;
+
+	ql_dbg(ql_dbg_disc, vha, 0x2090,
+	    "Configure devices -- dpc flags =0x%lx\n", flags);
+
+	rval = qlafx00_configure_all_targets(vha);
+
+	if (rval == QLA_SUCCESS) {
+		if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
+			rval = QLA_FUNCTION_FAILED;
+		} else {
+			atomic_set(&vha->loop_state, LOOP_READY);
+			ql_log(ql_log_info, vha, 0x2091,
+			    "Device Ready\n");
+		}
+	}
+
+	if (rval) {
+		ql_dbg(ql_dbg_disc, vha, 0x2092,
+		    "%s *** FAILED ***.\n", __func__);
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0x2093,
+		    "%s: exiting normally.\n", __func__);
+	}
+	return rval;
+}
+
+static void
+qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	fc_port_t *fcport;
+
+	vha->flags.online = 0;
+	ha->flags.chip_reset_done = 0;
+	ha->mr.fw_hbt_en = 0;
+	clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+	vha->qla_stats.total_isp_aborts++;
+
+	ql_log(ql_log_info, vha, 0x013f,
+	    "Performing ISP error recovery - ha = %p.\n", ha);
+
+	ha->isp_ops->reset_chip(vha);
+
+	if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+		atomic_set(&vha->loop_state, LOOP_DOWN);
+		atomic_set(&vha->loop_down_timer,
+		    QLAFX00_LOOP_DOWN_TIME);
+	} else {
+		if (!atomic_read(&vha->loop_down_timer))
+			atomic_set(&vha->loop_down_timer,
+			    QLAFX00_LOOP_DOWN_TIME);
+	}
+
+	/* Clear all async request states across all VPs. */
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+		fcport->flags = 0;
+		if (atomic_read(&fcport->state) == FCS_ONLINE)
+			qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+	}
+
+	if (!ha->flags.eeh_busy) {
+		/* Requeue all commands in outstanding command list. */
+		qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+	}
+
+	qla2x00_free_irqs(vha);
+	set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
+
+	/* Clear the Interrupts */
+	QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+
+	ql_log(ql_log_info, vha, 0x0140,
+	    "%s Done done - ha=%p.\n", __func__, ha);
+}
+
+/**
+ * qlafx00_init_response_q_entries() - Initializes response queue entries.
+ * @ha: HA context
+ *
+ * Beginning of request ring has initialization control block already built
+ * by nvram config routine.
+ *
+ * Returns 0 on success.
+ */
+void
+qlafx00_init_response_q_entries(struct rsp_que *rsp)
+{
+	uint16_t cnt;
+	response_t *pkt;
+
+	rsp->ring_ptr = rsp->ring;
+	rsp->ring_index    = 0;
+	rsp->status_srb = NULL;
+	pkt = rsp->ring_ptr;
+	for (cnt = 0; cnt < rsp->length; cnt++) {
+		pkt->signature = RESPONSE_PROCESSED;
+		WRT_REG_DWORD(&pkt->signature, RESPONSE_PROCESSED);
+		pkt++;
+	}
+}
+
+int
+qlafx00_rescan_isp(scsi_qla_host_t *vha)
+{
+	uint32_t status = QLA_FUNCTION_FAILED;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+	uint32_t aenmbx7;
+
+	qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
+
+	aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
+	ha->mbx_intr_code = MSW(aenmbx7);
+	ha->rqstq_intr_code = LSW(aenmbx7);
+	ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
+	ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
+	ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
+	ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
+
+	ql_dbg(ql_dbg_disc, vha, 0x2094,
+	    "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
+	    " Req que offset 0x%x Rsp que offset 0x%x\n",
+	    ha->mbx_intr_code, ha->rqstq_intr_code,
+	    ha->req_que_off, ha->rsp_que_len);
+
+	/* Clear the Interrupts */
+	QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
+
+	status = qla2x00_init_rings(vha);
+	if (!status) {
+		vha->flags.online = 1;
+
+		/* if no cable then assume it's good */
+		if ((vha->device_flags & DFLG_NO_CABLE))
+			status = 0;
+		/* Register system information */
+		if (qlafx00_fx_disc(vha,
+		    &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO))
+			ql_dbg(ql_dbg_disc, vha, 0x2095,
+			    "failed to register host info\n");
+	}
+	scsi_unblock_requests(vha->host);
+	return status;
+}
+
+void
+qlafx00_timer_routine(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t fw_heart_beat;
+	uint32_t aenmbx0;
+	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+
+	/* Check firmware health */
+	if (ha->mr.fw_hbt_cnt)
+		ha->mr.fw_hbt_cnt--;
+	else {
+		if ((!ha->flags.mr_reset_hdlr_active) &&
+		    (!test_bit(UNLOADING, &vha->dpc_flags)) &&
+		    (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
+		    (ha->mr.fw_hbt_en)) {
+			fw_heart_beat = RD_REG_DWORD(&reg->fwheartbeat);
+			if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
+				ha->mr.old_fw_hbt_cnt = fw_heart_beat;
+				ha->mr.fw_hbt_miss_cnt = 0;
+			} else {
+				ha->mr.fw_hbt_miss_cnt++;
+				if (ha->mr.fw_hbt_miss_cnt ==
+				    QLAFX00_HEARTBEAT_MISS_CNT) {
+					set_bit(ISP_ABORT_NEEDED,
+					    &vha->dpc_flags);
+					qla2xxx_wake_dpc(vha);
+					ha->mr.fw_hbt_miss_cnt = 0;
+				}
+			}
+		}
+		ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
+	}
+
+	if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
+		/* Reset recovery to be performed in timer routine */
+		aenmbx0 = RD_REG_DWORD(&reg->aenmailbox0);
+		if (ha->mr.fw_reset_timer_exp) {
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+			ha->mr.fw_reset_timer_exp = 0;
+		} else if (aenmbx0 == MBA_FW_RESTART_CMPLT) {
+			/* Wake up DPC to rescan the targets */
+			set_bit(FX00_TARGET_SCAN, &vha->dpc_flags);
+			clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+			ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+		} else if ((aenmbx0 == MBA_FW_STARTING) &&
+		    (!ha->mr.fw_hbt_en)) {
+			ha->mr.fw_hbt_en = 1;
+		} else if (!ha->mr.fw_reset_timer_tick) {
+			if (aenmbx0 == ha->mr.old_aenmbx0_state)
+				ha->mr.fw_reset_timer_exp = 1;
+			ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+		} else if (aenmbx0 == 0xFFFFFFFF) {
+			uint32_t data0, data1;
+
+			data0 = QLAFX00_RD_REG(ha,
+			    QLAFX00_BAR1_BASE_ADDR_REG);
+			data1 = QLAFX00_RD_REG(ha,
+			    QLAFX00_PEX0_WIN0_BASE_ADDR_REG);
+
+			data0 &= 0xffff0000;
+			data1 &= 0x0000ffff;
+
+			QLAFX00_WR_REG(ha,
+			    QLAFX00_PEX0_WIN0_BASE_ADDR_REG,
+			    (data0 | data1));
+		} else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) {
+			ha->mr.fw_reset_timer_tick =
+			    QLAFX00_MAX_RESET_INTERVAL;
+		}
+		ha->mr.old_aenmbx0_state = aenmbx0;
+		ha->mr.fw_reset_timer_tick--;
+	}
+}
+
+/*
+ *  qlfx00a_reset_initialize
+ *      Re-initialize after a iSA device reset.
+ *
+ * Input:
+ *      ha  = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success
+ */
+int
+qlafx00_reset_initialize(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (vha->device_flags & DFLG_DEV_FAILED) {
+		ql_dbg(ql_dbg_init, vha, 0x0142,
+		    "Device in failed state\n");
+		return QLA_SUCCESS;
+	}
+
+	ha->flags.mr_reset_hdlr_active = 1;
+
+	if (vha->flags.online) {
+		scsi_block_requests(vha->host);
+		qlafx00_abort_isp_cleanup(vha);
+	}
+
+	ql_log(ql_log_info, vha, 0x0143,
+	    "(%s): succeeded.\n", __func__);
+	ha->flags.mr_reset_hdlr_active = 0;
+	return QLA_SUCCESS;
+}
+
+/*
+ *  qlafx00_abort_isp
+ *      Resets ISP and aborts all outstanding commands.
+ *
+ * Input:
+ *      ha  = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success
+ */
+int
+qlafx00_abort_isp(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+
+	if (vha->flags.online) {
+		if (unlikely(pci_channel_offline(ha->pdev) &&
+		    ha->flags.pci_channel_io_perm_failure)) {
+			clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+			return QLA_SUCCESS;
+		}
+
+		scsi_block_requests(vha->host);
+		qlafx00_abort_isp_cleanup(vha);
+	}
+
+	ql_log(ql_log_info, vha, 0x0145,
+	    "(%s): succeeded.\n", __func__);
+
+	return QLA_SUCCESS;
+}
+
+static inline fc_port_t*
+qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id)
+{
+	fc_port_t	*fcport;
+
+	/* Check for matching device in remote port list. */
+	fcport = NULL;
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+		if (fcport->tgt_id == tgt_id) {
+			ql_dbg(ql_dbg_async, vha, 0x5072,
+			    "Matching fcport(%p) found with TGT-ID: 0x%x "
+			    "and Remote TGT_ID: 0x%x\n",
+			    fcport, fcport->tgt_id, tgt_id);
+			break;
+		}
+	}
+	return fcport;
+}
+
+static void
+qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
+{
+	fc_port_t	*fcport;
+
+	ql_log(ql_log_info, vha, 0x5073,
+	    "Detach TGT-ID: 0x%x\n", tgt_id);
+
+	fcport = qlafx00_get_fcport(vha, tgt_id);
+	if (!fcport)
+		return;
+
+	qla2x00_mark_device_lost(vha, fcport, 0, 0);
+
+	return;
+}
+
+int
+qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
+{
+	int rval = 0;
+	uint32_t aen_code, aen_data;
+
+	aen_code = FCH_EVT_VENDOR_UNIQUE;
+	aen_data = evt->u.aenfx.evtcode;
+
+	switch (evt->u.aenfx.evtcode) {
+	case QLAFX00_MBA_PORT_UPDATE:		/* Port database update */
+		if (evt->u.aenfx.mbx[1] == 0) {
+			if (evt->u.aenfx.mbx[2] == 1) {
+				if (!vha->flags.fw_tgt_reported)
+					vha->flags.fw_tgt_reported = 1;
+				atomic_set(&vha->loop_down_timer, 0);
+				atomic_set(&vha->loop_state, LOOP_UP);
+				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+				qla2xxx_wake_dpc(vha);
+			} else if (evt->u.aenfx.mbx[2] == 2) {
+				qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]);
+			}
+		} else if (evt->u.aenfx.mbx[1] == 0xffff) {
+			if (evt->u.aenfx.mbx[2] == 1) {
+				if (!vha->flags.fw_tgt_reported)
+					vha->flags.fw_tgt_reported = 1;
+				set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+			} else if (evt->u.aenfx.mbx[2] == 2) {
+				vha->device_flags |= DFLG_NO_CABLE;
+				qla2x00_mark_all_devices_lost(vha, 1);
+			}
+		}
+		break;
+	case QLAFX00_MBA_LINK_UP:
+		aen_code = FCH_EVT_LINKUP;
+		aen_data = 0;
+		break;
+	case QLAFX00_MBA_LINK_DOWN:
+		aen_code = FCH_EVT_LINKDOWN;
+		aen_data = 0;
+		break;
+	}
+
+	fc_host_post_event(vha->host, fc_get_event_number(),
+	    aen_code, aen_data);
+
+	return rval;
+}
+
+static void
+qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo)
+{
+	u64 port_name = 0, node_name = 0;
+
+	port_name = (unsigned long long)wwn_to_u64(pinfo->port_name);
+	node_name = (unsigned long long)wwn_to_u64(pinfo->node_name);
+
+	fc_host_node_name(vha->host) = node_name;
+	fc_host_port_name(vha->host) = port_name;
+	if (!pinfo->port_type)
+		vha->hw->current_topology = ISP_CFG_F;
+	if (pinfo->link_status == QLAFX00_LINK_STATUS_UP)
+		atomic_set(&vha->loop_state, LOOP_READY);
+	else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN)
+		atomic_set(&vha->loop_state, LOOP_DOWN);
+	vha->hw->link_data_rate = (uint16_t)pinfo->link_config;
+}
+
+static void
+qla2x00_fxdisc_iocb_timeout(void *data)
+{
+	srb_t *sp = (srb_t *)data;
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
+
+	complete(&lio->u.fxiocb.fxiocb_comp);
+}
+
+static void
+qla2x00_fxdisc_sp_done(void *data, void *ptr, int res)
+{
+	srb_t *sp = (srb_t *)ptr;
+	struct srb_iocb *lio = &sp->u.iocb_cmd;
+
+	complete(&lio->u.fxiocb.fxiocb_comp);
+}
+
+int
+qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t fx_type)
+{
+	srb_t *sp;
+	struct srb_iocb *fdisc;
+	int rval = QLA_FUNCTION_FAILED;
+	struct qla_hw_data *ha = vha->hw;
+	struct host_system_info *phost_info;
+	struct register_host_info *preg_hsi;
+	struct new_utsname *p_sysid = NULL;
+	struct timeval tv;
+
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp)
+		goto done;
+
+	fdisc = &sp->u.iocb_cmd;
+	switch (fx_type) {
+	case FXDISC_GET_CONFIG_INFO:
+	fdisc->u.fxiocb.flags =
+		    SRB_FXDISC_RESP_DMA_VALID;
+		fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data);
+		break;
+	case FXDISC_GET_PORT_INFO:
+		fdisc->u.fxiocb.flags =
+		    SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
+		fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO;
+		fdisc->u.fxiocb.req_data = fcport->port_id;
+		break;
+	case FXDISC_GET_TGT_NODE_INFO:
+		fdisc->u.fxiocb.flags =
+		    SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
+		fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO;
+		fdisc->u.fxiocb.req_data = fcport->tgt_id;
+		break;
+	case FXDISC_GET_TGT_NODE_LIST:
+		fdisc->u.fxiocb.flags =
+		    SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
+		fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE;
+		break;
+	case FXDISC_REG_HOST_INFO:
+		fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID;
+		fdisc->u.fxiocb.req_len = sizeof(struct register_host_info);
+		p_sysid = utsname();
+		if (!p_sysid) {
+			ql_log(ql_log_warn, vha, 0x303c,
+			    "Not able to get the system informtion\n");
+			goto done_free_sp;
+		}
+		break;
+	default:
+		break;
+	}
+
+	if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
+		fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev,
+		    fdisc->u.fxiocb.req_len,
+		    &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL);
+		if (!fdisc->u.fxiocb.req_addr)
+			goto done_free_sp;
+
+		if (fx_type == FXDISC_REG_HOST_INFO) {
+			preg_hsi = (struct register_host_info *)
+				fdisc->u.fxiocb.req_addr;
+			phost_info = &preg_hsi->hsi;
+			memset(preg_hsi, 0, sizeof(struct register_host_info));
+			phost_info->os_type = OS_TYPE_LINUX;
+			strncpy(phost_info->sysname,
+			    p_sysid->sysname, SYSNAME_LENGTH);
+			strncpy(phost_info->nodename,
+			    p_sysid->nodename, NODENAME_LENGTH);
+			strncpy(phost_info->release,
+			    p_sysid->release, RELEASE_LENGTH);
+			strncpy(phost_info->version,
+			    p_sysid->version, VERSION_LENGTH);
+			strncpy(phost_info->machine,
+			    p_sysid->machine, MACHINE_LENGTH);
+			strncpy(phost_info->domainname,
+			    p_sysid->domainname, DOMNAME_LENGTH);
+			strncpy(phost_info->hostdriver,
+			    QLA2XXX_VERSION, VERSION_LENGTH);
+			do_gettimeofday(&tv);
+			preg_hsi->utc = (uint64_t)tv.tv_sec;
+			ql_dbg(ql_dbg_init, vha, 0x0149,
+			    "ISP%04X: Host registration with firmware\n",
+			    ha->pdev->device);
+			ql_dbg(ql_dbg_init, vha, 0x014a,
+			    "os_type = '%d', sysname = '%s', nodname = '%s'\n",
+			    phost_info->os_type,
+			    phost_info->sysname,
+			    phost_info->nodename);
+			ql_dbg(ql_dbg_init, vha, 0x014b,
+			    "release = '%s', version = '%s'\n",
+			    phost_info->release,
+			    phost_info->version);
+			ql_dbg(ql_dbg_init, vha, 0x014c,
+			    "machine = '%s' "
+			    "domainname = '%s', hostdriver = '%s'\n",
+			    phost_info->machine,
+			    phost_info->domainname,
+			    phost_info->hostdriver);
+			ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d,
+			    (uint8_t *)phost_info,
+			    sizeof(struct host_system_info));
+		}
+	}
+
+	if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
+		fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev,
+		    fdisc->u.fxiocb.rsp_len,
+		    &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL);
+		if (!fdisc->u.fxiocb.rsp_addr)
+			goto done_unmap_req;
+	}
+
+	sp->type = SRB_FXIOCB_DCMD;
+	sp->name = "fxdisc";
+	qla2x00_init_timer(sp, FXDISC_TIMEOUT);
+	fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
+	fdisc->u.fxiocb.req_func_type = fx_type;
+	sp->done = qla2x00_fxdisc_sp_done;
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_unmap_dma;
+
+	wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp);
+
+	if (fx_type == FXDISC_GET_CONFIG_INFO) {
+		struct config_info_data *pinfo =
+		    (struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
+		memcpy(&vha->hw->mr.product_name, pinfo->product_name,
+		    sizeof(vha->hw->mr.product_name));
+		memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
+		    sizeof(vha->hw->mr.symbolic_name));
+		memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
+		    sizeof(vha->hw->mr.serial_num));
+		memcpy(&vha->hw->mr.hw_version, pinfo->hw_version,
+		    sizeof(vha->hw->mr.hw_version));
+		memcpy(&vha->hw->mr.fw_version, pinfo->fw_version,
+		    sizeof(vha->hw->mr.fw_version));
+		strim(vha->hw->mr.fw_version);
+		memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version,
+		    sizeof(vha->hw->mr.uboot_version));
+		memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num,
+		    sizeof(vha->hw->mr.fru_serial_num));
+	} else if (fx_type == FXDISC_GET_PORT_INFO) {
+		struct port_info_data *pinfo =
+		    (struct port_info_data *) fdisc->u.fxiocb.rsp_addr;
+		memcpy(vha->node_name, pinfo->node_name, WWN_SIZE);
+		memcpy(vha->port_name, pinfo->port_name, WWN_SIZE);
+		vha->d_id.b.domain = pinfo->port_id[0];
+		vha->d_id.b.area = pinfo->port_id[1];
+		vha->d_id.b.al_pa = pinfo->port_id[2];
+		qlafx00_update_host_attr(vha, pinfo);
+		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141,
+		    (uint8_t *)pinfo, 16);
+	} else if (fx_type == FXDISC_GET_TGT_NODE_INFO) {
+		struct qlafx00_tgt_node_info *pinfo =
+		    (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
+		memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE);
+		memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE);
+		fcport->port_type = FCT_TARGET;
+		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144,
+		    (uint8_t *)pinfo, 16);
+	} else if (fx_type == FXDISC_GET_TGT_NODE_LIST) {
+		struct qlafx00_tgt_node_info *pinfo =
+		    (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
+		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,
+		    (uint8_t *)pinfo, 16);
+		memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
+	}
+	rval = fdisc->u.fxiocb.result;
+
+done_unmap_dma:
+	if (fdisc->u.fxiocb.rsp_addr)
+		dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len,
+		    fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle);
+
+done_unmap_req:
+	if (fdisc->u.fxiocb.req_addr)
+		dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
+		    fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
+done_free_sp:
+	sp->free(vha, sp);
+done:
+	return rval;
+}
+
+static void
+qlafx00_abort_iocb_timeout(void *data)
+{
+	srb_t *sp = (srb_t *)data;
+	struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+	abt->u.abt.comp_status = CS_TIMEOUT;
+	complete(&abt->u.abt.comp);
+}
+
+static void
+qlafx00_abort_sp_done(void *data, void *ptr, int res)
+{
+	srb_t *sp = (srb_t *)ptr;
+	struct srb_iocb *abt = &sp->u.iocb_cmd;
+
+	complete(&abt->u.abt.comp);
+}
+
+static int
+qlafx00_async_abt_cmd(srb_t *cmd_sp)
+{
+	scsi_qla_host_t *vha = cmd_sp->fcport->vha;
+	fc_port_t *fcport = cmd_sp->fcport;
+	struct srb_iocb *abt_iocb;
+	srb_t *sp;
+	int rval = QLA_FUNCTION_FAILED;
+
+	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+	if (!sp)
+		goto done;
+
+	abt_iocb = &sp->u.iocb_cmd;
+	sp->type = SRB_ABT_CMD;
+	sp->name = "abort";
+	qla2x00_init_timer(sp, FXDISC_TIMEOUT);
+	abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
+	sp->done = qlafx00_abort_sp_done;
+	abt_iocb->timeout = qlafx00_abort_iocb_timeout;
+	init_completion(&abt_iocb->u.abt.comp);
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
+	ql_dbg(ql_dbg_async, vha, 0x507c,
+	    "Abort command issued - hdl=%x, target_id=%x\n",
+	    cmd_sp->handle, fcport->tgt_id);
+
+	wait_for_completion(&abt_iocb->u.abt.comp);
+
+	rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
+	    QLA_SUCCESS : QLA_FUNCTION_FAILED;
+
+done_free_sp:
+	sp->free(vha, sp);
+done:
+	return rval;
+}
+
+int
+qlafx00_abort_command(srb_t *sp)
+{
+	unsigned long   flags = 0;
+
+	uint32_t	handle;
+	fc_port_t	*fcport = sp->fcport;
+	struct scsi_qla_host *vha = fcport->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = vha->req;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (handle = 1; handle < DEFAULT_OUTSTANDING_COMMANDS; handle++) {
+		if (req->outstanding_cmds[handle] == sp)
+			break;
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	if (handle == DEFAULT_OUTSTANDING_COMMANDS) {
+		/* Command not found. */
+		return QLA_FUNCTION_FAILED;
+	}
+	return qlafx00_async_abt_cmd(sp);
+}
+
+/*
+ * qlafx00_initialize_adapter
+ *      Initialize board.
+ *
+ * Input:
+ *      ha = adapter block pointer.
+ *
+ * Returns:
+ *      0 = success
+ */
+int
+qlafx00_initialize_adapter(scsi_qla_host_t *vha)
+{
+	int	rval;
+	struct qla_hw_data *ha = vha->hw;
+
+	/* Clear adapter flags. */
+	vha->flags.online = 0;
+	ha->flags.chip_reset_done = 0;
+	vha->flags.reset_active = 0;
+	ha->flags.pci_channel_io_perm_failure = 0;
+	ha->flags.eeh_busy = 0;
+	ha->thermal_support = 0;
+	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+	atomic_set(&vha->loop_state, LOOP_DOWN);
+	vha->device_flags = DFLG_NO_CABLE;
+	vha->dpc_flags = 0;
+	vha->flags.management_server_logged_in = 0;
+	vha->marker_needed = 0;
+	ha->isp_abort_cnt = 0;
+	ha->beacon_blink_led = 0;
+
+	set_bit(0, ha->req_qid_map);
+	set_bit(0, ha->rsp_qid_map);
+
+	ql_dbg(ql_dbg_init, vha, 0x0147,
+	    "Configuring PCI space...\n");
+
+	rval = ha->isp_ops->pci_config(vha);
+	if (rval) {
+		ql_log(ql_log_warn, vha, 0x0148,
+		    "Unable to configure PCI space.\n");
+		return rval;
+	}
+
+	rval = qlafx00_init_fw_ready(vha);
+	if (rval != QLA_SUCCESS)
+		return rval;
+
+	qlafx00_save_queue_ptrs(vha);
+
+	rval = qlafx00_config_queues(vha);
+	if (rval != QLA_SUCCESS)
+		return rval;
+
+	/*
+	 * Allocate the array of outstanding commands
+	 * now that we know the firmware resources.
+	 */
+	rval = qla2x00_alloc_outstanding_cmds(ha, vha->req);
+	if (rval != QLA_SUCCESS)
+		return rval;
+
+	rval = qla2x00_init_rings(vha);
+	ha->flags.chip_reset_done = 1;
+
+	return rval;
+}
+
+uint32_t
+qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr,
+		      char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	int rval = QLA_FUNCTION_FAILED;
+	uint32_t state[1];
+
+	if (qla2x00_reset_active(vha))
+		ql_log(ql_log_warn, vha, 0x70ce,
+		    "ISP reset active.\n");
+	else if (!vha->hw->flags.eeh_busy) {
+		rval = qlafx00_get_firmware_state(vha, state);
+	}
+	if (rval != QLA_SUCCESS)
+		memset(state, -1, sizeof(state));
+
+	return state[0];
+}
+
+void
+qlafx00_get_host_speed(struct Scsi_Host *shost)
+{
+	struct qla_hw_data *ha = ((struct scsi_qla_host *)
+					(shost_priv(shost)))->hw;
+	u32 speed = FC_PORTSPEED_UNKNOWN;
+
+	switch (ha->link_data_rate) {
+	case QLAFX00_PORT_SPEED_2G:
+		speed = FC_PORTSPEED_2GBIT;
+		break;
+	case QLAFX00_PORT_SPEED_4G:
+		speed = FC_PORTSPEED_4GBIT;
+		break;
+	case QLAFX00_PORT_SPEED_8G:
+		speed = FC_PORTSPEED_8GBIT;
+		break;
+	case QLAFX00_PORT_SPEED_10G:
+		speed = FC_PORTSPEED_10GBIT;
+		break;
+	}
+	fc_host_speed(shost) = speed;
+}
+
+/** QLAFX00 specific ISR implementation functions */
+
+static inline void
+qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
+		     uint32_t sense_len, struct rsp_que *rsp, int res)
+{
+	struct scsi_qla_host *vha = sp->fcport->vha;
+	struct scsi_cmnd *cp = GET_CMD_SP(sp);
+	uint32_t track_sense_len;
+
+	SET_FW_SENSE_LEN(sp, sense_len);
+
+	if (sense_len >= SCSI_SENSE_BUFFERSIZE)
+		sense_len = SCSI_SENSE_BUFFERSIZE;
+
+	SET_CMD_SENSE_LEN(sp, sense_len);
+	SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
+	track_sense_len = sense_len;
+
+	if (sense_len > par_sense_len)
+		sense_len = par_sense_len;
+
+	memcpy(cp->sense_buffer, sense_data, sense_len);
+
+	SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len);
+
+	SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
+	track_sense_len -= sense_len;
+	SET_CMD_SENSE_LEN(sp, track_sense_len);
+
+	ql_dbg(ql_dbg_io, vha, 0x304d,
+	    "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n",
+	    sense_len, par_sense_len, track_sense_len);
+	if (GET_FW_SENSE_LEN(sp) > 0) {
+		rsp->status_srb = sp;
+		cp->result = res;
+	}
+
+	if (sense_len) {
+		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039,
+		    "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
+		    sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
+		    cp);
+		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049,
+		    cp->sense_buffer, sense_len);
+	}
+}
+
+static void
+qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+		      struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp,
+		      uint16_t sstatus, uint16_t cpstatus)
+{
+	struct srb_iocb *tmf;
+
+	tmf = &sp->u.iocb_cmd;
+	if (cpstatus != CS_COMPLETE ||
+	    (sstatus & SS_RESPONSE_INFO_LEN_VALID))
+		cpstatus = CS_INCOMPLETE;
+	tmf->u.tmf.comp_status = cpstatus;
+	sp->done(vha, sp, 0);
+}
+
+static void
+qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+			 struct abort_iocb_entry_fx00 *pkt)
+{
+	const char func[] = "ABT_IOCB";
+	srb_t *sp;
+	struct srb_iocb *abt;
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+	if (!sp)
+		return;
+
+	abt = &sp->u.iocb_cmd;
+	abt->u.abt.comp_status = le32_to_cpu(pkt->tgt_id_sts);
+	sp->done(vha, sp, 0);
+}
+
+static void
+qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
+			 struct ioctl_iocb_entry_fx00 *pkt)
+{
+	const char func[] = "IOSB_IOCB";
+	srb_t *sp;
+	struct fc_bsg_job *bsg_job;
+	struct srb_iocb *iocb_job;
+	int res;
+	struct qla_mt_iocb_rsp_fx00 fstatus;
+	uint8_t	*fw_sts_ptr;
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+	if (!sp)
+		return;
+
+	if (sp->type == SRB_FXIOCB_DCMD) {
+		iocb_job = &sp->u.iocb_cmd;
+		iocb_job->u.fxiocb.seq_number = le32_to_cpu(pkt->seq_no);
+		iocb_job->u.fxiocb.fw_flags = le32_to_cpu(pkt->fw_iotcl_flags);
+		iocb_job->u.fxiocb.result = le32_to_cpu(pkt->status);
+		if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID)
+			iocb_job->u.fxiocb.req_data =
+			    le32_to_cpu(pkt->dataword_r);
+	} else {
+		bsg_job = sp->u.bsg_job;
+
+		memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
+
+		fstatus.reserved_1 = pkt->reserved_0;
+		fstatus.func_type = pkt->comp_func_num;
+		fstatus.ioctl_flags = pkt->fw_iotcl_flags;
+		fstatus.ioctl_data = pkt->dataword_r;
+		fstatus.adapid = pkt->adapid;
+		fstatus.adapid_hi = pkt->adapid_hi;
+		fstatus.reserved_2 = pkt->reserved_1;
+		fstatus.res_count = pkt->residuallen;
+		fstatus.status = pkt->status;
+		fstatus.seq_number = pkt->seq_no;
+		memcpy(fstatus.reserved_3,
+		    pkt->reserved_2, 20 * sizeof(uint8_t));
+
+		fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
+		    sizeof(struct fc_bsg_reply);
+
+		memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
+		    sizeof(struct qla_mt_iocb_rsp_fx00));
+		bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+			sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t);
+
+		ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+		    sp->fcport->vha, 0x5080,
+		    (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00));
+
+		ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+		    sp->fcport->vha, 0x5074,
+		    (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
+
+		res = bsg_job->reply->result = DID_OK << 16;
+		bsg_job->reply->reply_payload_rcv_len =
+		    bsg_job->reply_payload.payload_len;
+	}
+	sp->done(vha, sp, res);
+}
+
+/**
+ * qlafx00_status_entry() - Process a Status IOCB entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ */
+static void
+qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
+{
+	srb_t		*sp;
+	fc_port_t	*fcport;
+	struct scsi_cmnd *cp;
+	struct sts_entry_fx00 *sts;
+	uint16_t	comp_status;
+	uint16_t	scsi_status;
+	uint16_t	ox_id;
+	uint8_t		lscsi_status;
+	int32_t		resid;
+	uint32_t	sense_len, par_sense_len, rsp_info_len, resid_len,
+	    fw_resid_len;
+	uint8_t		*rsp_info = NULL, *sense_data = NULL;
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t hindex, handle;
+	uint16_t que;
+	struct req_que *req;
+	int logit = 1;
+	int res = 0;
+
+	sts = (struct sts_entry_fx00 *) pkt;
+
+	comp_status = le16_to_cpu(sts->comp_status);
+	scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
+	hindex = sts->handle;
+	handle = LSW(hindex);
+
+	que = MSW(hindex);
+	req = ha->req_q_map[que];
+
+	/* Validate handle. */
+	if (handle < req->num_outstanding_cmds)
+		sp = req->outstanding_cmds[handle];
+	else
+		sp = NULL;
+
+	if (sp == NULL) {
+		ql_dbg(ql_dbg_io, vha, 0x3034,
+		    "Invalid status handle (0x%x).\n", handle);
+
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		qla2xxx_wake_dpc(vha);
+		return;
+	}
+
+	if (sp->type == SRB_TM_CMD) {
+		req->outstanding_cmds[handle] = NULL;
+		qlafx00_tm_iocb_entry(vha, req, pkt, sp,
+		    scsi_status, comp_status);
+		return;
+	}
+
+	/* Fast path completion. */
+	if (comp_status == CS_COMPLETE && scsi_status == 0) {
+		qla2x00_do_host_ramp_up(vha);
+		qla2x00_process_completed_request(vha, req, handle);
+		return;
+	}
+
+	req->outstanding_cmds[handle] = NULL;
+	cp = GET_CMD_SP(sp);
+	if (cp == NULL) {
+		ql_dbg(ql_dbg_io, vha, 0x3048,
+		    "Command already returned (0x%x/%p).\n",
+		    handle, sp);
+
+		return;
+	}
+
+	lscsi_status = scsi_status & STATUS_MASK;
+
+	fcport = sp->fcport;
+
+	ox_id = 0;
+	sense_len = par_sense_len = rsp_info_len = resid_len =
+		fw_resid_len = 0;
+	if (scsi_status & SS_SENSE_LEN_VALID)
+		sense_len = le32_to_cpu(sts->sense_len);
+	if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
+		resid_len = le32_to_cpu(sts->residual_len);
+	if (comp_status == CS_DATA_UNDERRUN)
+		fw_resid_len = le32_to_cpu(sts->residual_len);
+	rsp_info = sense_data = sts->data;
+	par_sense_len = sizeof(sts->data);
+
+	/* Check for overrun. */
+	if (comp_status == CS_COMPLETE &&
+	    scsi_status & SS_RESIDUAL_OVER)
+		comp_status = CS_DATA_OVERRUN;
+
+	/*
+	 * Based on Host and scsi status generate status code for Linux
+	 */
+	switch (comp_status) {
+	case CS_COMPLETE:
+	case CS_QUEUE_FULL:
+		if (scsi_status == 0) {
+			res = DID_OK << 16;
+			break;
+		}
+		if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
+			resid = resid_len;
+			scsi_set_resid(cp, resid);
+
+			if (!lscsi_status &&
+			    ((unsigned)(scsi_bufflen(cp) - resid) <
+			     cp->underflow)) {
+				ql_dbg(ql_dbg_io, fcport->vha, 0x3050,
+				    "Mid-layer underflow "
+				    "detected (0x%x of 0x%x bytes).\n",
+				    resid, scsi_bufflen(cp));
+
+				res = DID_ERROR << 16;
+				break;
+			}
+		}
+		res = DID_OK << 16 | lscsi_status;
+
+		if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
+			ql_dbg(ql_dbg_io, fcport->vha, 0x3051,
+			    "QUEUE FULL detected.\n");
+			break;
+		}
+		logit = 0;
+		if (lscsi_status != SS_CHECK_CONDITION)
+			break;
+
+		memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+		if (!(scsi_status & SS_SENSE_LEN_VALID))
+			break;
+
+		qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len,
+		    rsp, res);
+		break;
+
+	case CS_DATA_UNDERRUN:
+		/* Use F/W calculated residual length. */
+		if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
+			resid = fw_resid_len;
+		else
+			resid = resid_len;
+		scsi_set_resid(cp, resid);
+		if (scsi_status & SS_RESIDUAL_UNDER) {
+			if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
+			    && fw_resid_len != resid_len) {
+				ql_dbg(ql_dbg_io, fcport->vha, 0x3052,
+				    "Dropped frame(s) detected "
+				    "(0x%x of 0x%x bytes).\n",
+				    resid, scsi_bufflen(cp));
+
+				res = DID_ERROR << 16 | lscsi_status;
+				goto check_scsi_status;
+			}
+
+			if (!lscsi_status &&
+			    ((unsigned)(scsi_bufflen(cp) - resid) <
+			    cp->underflow)) {
+				ql_dbg(ql_dbg_io, fcport->vha, 0x3053,
+				    "Mid-layer underflow "
+				    "detected (0x%x of 0x%x bytes, "
+				    "cp->underflow: 0x%x).\n",
+				    resid, scsi_bufflen(cp), cp->underflow);
+
+				res = DID_ERROR << 16;
+				break;
+			}
+		} else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
+			    lscsi_status != SAM_STAT_BUSY) {
+			/*
+			 * scsi status of task set and busy are considered
+			 * to be task not completed.
+			 */
+
+			ql_dbg(ql_dbg_io, fcport->vha, 0x3054,
+			    "Dropped frame(s) detected (0x%x "
+			    "of 0x%x bytes).\n", resid,
+			    scsi_bufflen(cp));
+
+			res = DID_ERROR << 16 | lscsi_status;
+			goto check_scsi_status;
+		} else {
+			ql_dbg(ql_dbg_io, fcport->vha, 0x3055,
+			    "scsi_status: 0x%x, lscsi_status: 0x%x\n",
+			    scsi_status, lscsi_status);
+		}
+
+		res = DID_OK << 16 | lscsi_status;
+		logit = 0;
+
+check_scsi_status:
+		/*
+		 * Check to see if SCSI Status is non zero. If so report SCSI
+		 * Status.
+		 */
+		if (lscsi_status != 0) {
+			if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
+				ql_dbg(ql_dbg_io, fcport->vha, 0x3056,
+				    "QUEUE FULL detected.\n");
+				logit = 1;
+				break;
+			}
+			if (lscsi_status != SS_CHECK_CONDITION)
+				break;
+
+			memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+			if (!(scsi_status & SS_SENSE_LEN_VALID))
+				break;
+
+			qlafx00_handle_sense(sp, sense_data, par_sense_len,
+			    sense_len, rsp, res);
+		}
+		break;
+
+	case CS_PORT_LOGGED_OUT:
+	case CS_PORT_CONFIG_CHG:
+	case CS_PORT_BUSY:
+	case CS_INCOMPLETE:
+	case CS_PORT_UNAVAILABLE:
+	case CS_TIMEOUT:
+	case CS_RESET:
+
+		/*
+		 * We are going to have the fc class block the rport
+		 * while we try to recover so instruct the mid layer
+		 * to requeue until the class decides how to handle this.
+		 */
+		res = DID_TRANSPORT_DISRUPTED << 16;
+
+		ql_dbg(ql_dbg_io, fcport->vha, 0x3057,
+		    "Port down status: port-state=0x%x.\n",
+		    atomic_read(&fcport->state));
+
+		if (atomic_read(&fcport->state) == FCS_ONLINE)
+			qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+		break;
+
+	case CS_ABORTED:
+		res = DID_RESET << 16;
+		break;
+
+	default:
+		res = DID_ERROR << 16;
+		break;
+	}
+
+	if (logit)
+		ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
+		    "FCP command status: 0x%x-0x%x (0x%x) "
+		    "nexus=%ld:%d:%d tgt_id: 0x%x lscsi_status: 0x%x"
+		    "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
+		    "rsp_info=0x%x resid=0x%x fw_resid=0x%x "
+		    "sense_len=0x%x, par_sense_len=0x%x, rsp_info_len=0x%x\n",
+		    comp_status, scsi_status, res, vha->host_no,
+		    cp->device->id, cp->device->lun, fcport->tgt_id,
+		    lscsi_status, cp->cmnd[0], cp->cmnd[1], cp->cmnd[2],
+		    cp->cmnd[3], cp->cmnd[4], cp->cmnd[5], cp->cmnd[6],
+		    cp->cmnd[7], cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp),
+		    rsp_info_len, resid_len, fw_resid_len, sense_len,
+		    par_sense_len, rsp_info_len);
+
+	if (!res)
+		qla2x00_do_host_ramp_up(vha);
+
+	if (rsp->status_srb == NULL)
+		sp->done(ha, sp, res);
+}
+
+/**
+ * qlafx00_status_cont_entry() - Process a Status Continuations entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ *
+ * Extended sense data.
+ */
+static void
+qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
+{
+	uint8_t	sense_sz = 0;
+	struct qla_hw_data *ha = rsp->hw;
+	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
+	srb_t *sp = rsp->status_srb;
+	struct scsi_cmnd *cp;
+	uint32_t sense_len;
+	uint8_t *sense_ptr;
+
+	if (!sp) {
+		ql_dbg(ql_dbg_io, vha, 0x3037,
+		    "no SP, sp = %p\n", sp);
+		return;
+	}
+
+	if (!GET_FW_SENSE_LEN(sp)) {
+		ql_dbg(ql_dbg_io, vha, 0x304b,
+		    "no fw sense data, sp = %p\n", sp);
+		return;
+	}
+	cp = GET_CMD_SP(sp);
+	if (cp == NULL) {
+		ql_log(ql_log_warn, vha, 0x303b,
+		    "cmd is NULL: already returned to OS (sp=%p).\n", sp);
+
+		rsp->status_srb = NULL;
+		return;
+	}
+
+	if (!GET_CMD_SENSE_LEN(sp)) {
+		ql_dbg(ql_dbg_io, vha, 0x304c,
+		    "no sense data, sp = %p\n", sp);
+	} else {
+		sense_len = GET_CMD_SENSE_LEN(sp);
+		sense_ptr = GET_CMD_SENSE_PTR(sp);
+		ql_dbg(ql_dbg_io, vha, 0x304f,
+		    "sp=%p sense_len=0x%x sense_ptr=%p.\n",
+		    sp, sense_len, sense_ptr);
+
+		if (sense_len > sizeof(pkt->data))
+			sense_sz = sizeof(pkt->data);
+		else
+			sense_sz = sense_len;
+
+		/* Move sense data. */
+		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e,
+		    (uint8_t *)pkt, sizeof(sts_cont_entry_t));
+		memcpy(sense_ptr, pkt->data, sense_sz);
+		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a,
+		    sense_ptr, sense_sz);
+
+		sense_len -= sense_sz;
+		sense_ptr += sense_sz;
+
+		SET_CMD_SENSE_PTR(sp, sense_ptr);
+		SET_CMD_SENSE_LEN(sp, sense_len);
+	}
+	sense_len = GET_FW_SENSE_LEN(sp);
+	sense_len = (sense_len > sizeof(pkt->data)) ?
+	    (sense_len - sizeof(pkt->data)) : 0;
+	SET_FW_SENSE_LEN(sp, sense_len);
+
+	/* Place command on done queue. */
+	if (sense_len == 0) {
+		rsp->status_srb = NULL;
+		sp->done(ha, sp, cp->result);
+	}
+}
+
+/**
+ * qlafx00_multistatus_entry() - Process Multi response queue entries.
+ * @ha: SCSI driver HA context
+ */
+static void
+qlafx00_multistatus_entry(struct scsi_qla_host *vha,
+	struct rsp_que *rsp, void *pkt)
+{
+	srb_t		*sp;
+	struct multi_sts_entry_fx00 *stsmfx;
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t handle, hindex, handle_count, i;
+	uint16_t que;
+	struct req_que *req;
+	uint32_t *handle_ptr;
+
+	stsmfx = (struct multi_sts_entry_fx00 *) pkt;
+
+	handle_count = stsmfx->handle_count;
+
+	if (handle_count > MAX_HANDLE_COUNT) {
+		ql_dbg(ql_dbg_io, vha, 0x3035,
+		    "Invalid handle count (0x%x).\n", handle_count);
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		qla2xxx_wake_dpc(vha);
+		return;
+	}
+
+	handle_ptr = (uint32_t *) &stsmfx->handles[0];
+
+	for (i = 0; i < handle_count; i++) {
+		hindex = le32_to_cpu(*handle_ptr);
+		handle = LSW(hindex);
+		que = MSW(hindex);
+		req = ha->req_q_map[que];
+
+		/* Validate handle. */
+		if (handle < req->num_outstanding_cmds)
+			sp = req->outstanding_cmds[handle];
+		else
+			sp = NULL;
+
+		if (sp == NULL) {
+			ql_dbg(ql_dbg_io, vha, 0x3044,
+			    "Invalid status handle (0x%x).\n", handle);
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+			qla2xxx_wake_dpc(vha);
+			return;
+		}
+		qla2x00_process_completed_request(vha, req, handle);
+		handle_ptr++;
+	}
+}
+
+/**
+ * qlafx00_error_entry() - Process an error entry.
+ * @ha: SCSI driver HA context
+ * @pkt: Entry pointer
+ */
+static void
+qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
+		    struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype)
+{
+	srb_t *sp;
+	struct qla_hw_data *ha = vha->hw;
+	const char func[] = "ERROR-IOCB";
+	uint16_t que = MSW(pkt->handle);
+	struct req_que *req = NULL;
+	int res = DID_ERROR << 16;
+
+	ql_dbg(ql_dbg_async, vha, 0x507f,
+	    "type of error status in response: 0x%x\n", estatus);
+
+	req = ha->req_q_map[que];
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+	if (sp) {
+		sp->done(ha, sp, res);
+		return;
+	}
+
+	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+	qla2xxx_wake_dpc(vha);
+}
+
+/**
+ * qlafx00_process_response_queue() - Process response queue entries.
+ * @ha: SCSI driver HA context
+ */
+static void
+qlafx00_process_response_queue(struct scsi_qla_host *vha,
+	struct rsp_que *rsp)
+{
+	struct sts_entry_fx00 *pkt;
+	response_t *lptr;
+
+	if (!vha->flags.online)
+		return;
+
+	while (RD_REG_DWORD(&(rsp->ring_ptr->signature)) !=
+	    RESPONSE_PROCESSED) {
+		lptr = rsp->ring_ptr;
+		memcpy_fromio(rsp->rsp_pkt, lptr, sizeof(rsp->rsp_pkt));
+		pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
+
+		rsp->ring_index++;
+		if (rsp->ring_index == rsp->length) {
+			rsp->ring_index = 0;
+			rsp->ring_ptr = rsp->ring;
+		} else {
+			rsp->ring_ptr++;
+		}
+
+		if (pkt->entry_status != 0 &&
+		    pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
+			qlafx00_error_entry(vha, rsp,
+			    (struct sts_entry_fx00 *)pkt, pkt->entry_status,
+			    pkt->entry_type);
+			goto next_iter;
+			continue;
+		}
+
+		switch (pkt->entry_type) {
+		case STATUS_TYPE_FX00:
+			qlafx00_status_entry(vha, rsp, pkt);
+			break;
+
+		case STATUS_CONT_TYPE_FX00:
+			qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
+			break;
+
+		case MULTI_STATUS_TYPE_FX00:
+			qlafx00_multistatus_entry(vha, rsp, pkt);
+			break;
+
+		case ABORT_IOCB_TYPE_FX00:
+			qlafx00_abort_iocb_entry(vha, rsp->req,
+			   (struct abort_iocb_entry_fx00 *)pkt);
+			break;
+
+		case IOCTL_IOSB_TYPE_FX00:
+			qlafx00_ioctl_iosb_entry(vha, rsp->req,
+			    (struct ioctl_iocb_entry_fx00 *)pkt);
+			break;
+		default:
+			/* Type Not Supported. */
+			ql_dbg(ql_dbg_async, vha, 0x5081,
+			    "Received unknown response pkt type %x "
+			    "entry status=%x.\n",
+			    pkt->entry_type, pkt->entry_status);
+			break;
+		}
+next_iter:
+		WRT_REG_DWORD(&lptr->signature, RESPONSE_PROCESSED);
+		wmb();
+	}
+
+	/* Adjust ring index */
+	WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+}
+
+/**
+ * qlafx00_async_event() - Process aynchronous events.
+ * @ha: SCSI driver HA context
+ */
+static void
+qlafx00_async_event(scsi_qla_host_t *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_fx00 __iomem *reg;
+	int data_size = 1;
+
+	reg = &ha->iobase->ispfx00;
+	/* Setup to process RIO completion. */
+	switch (ha->aenmb[0]) {
+	case QLAFX00_MBA_SYSTEM_ERR:		/* System Error */
+		ql_log(ql_log_warn, vha, 0x5079,
+		    "ISP System Error - mbx1=%x\n", ha->aenmb[0]);
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		break;
+
+	case QLAFX00_MBA_SHUTDOWN_RQSTD:	/* Shutdown requested */
+		ql_dbg(ql_dbg_async, vha, 0x5076,
+		    "Asynchronous FW shutdown requested.\n");
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		qla2xxx_wake_dpc(vha);
+		break;
+
+	case QLAFX00_MBA_PORT_UPDATE:		/* Port database update */
+		ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
+		ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
+		ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
+		ql_dbg(ql_dbg_async, vha, 0x5077,
+		    "Asynchronous port Update received "
+		    "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
+		    ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
+		data_size = 4;
+		break;
+	default:
+		ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
+		ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
+		ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
+		ha->aenmb[4] = RD_REG_WORD(&reg->aenmailbox4);
+		ha->aenmb[5] = RD_REG_WORD(&reg->aenmailbox5);
+		ha->aenmb[6] = RD_REG_WORD(&reg->aenmailbox6);
+		ha->aenmb[7] = RD_REG_WORD(&reg->aenmailbox7);
+		ql_dbg(ql_dbg_async, vha, 0x5078,
+		    "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
+		    ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
+		    ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]);
+		break;
+	}
+	qlafx00_post_aenfx_work(vha, ha->aenmb[0],
+	    (uint32_t *)ha->aenmb, data_size);
+}
+
+/**
+ *
+ * qlafx00x_mbx_completion() - Process mailbox command completions.
+ * @ha: SCSI driver HA context
+ * @mb16: Mailbox16 register
+ */
+static void
+qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
+{
+	uint16_t	cnt;
+	uint16_t __iomem *wptr;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+
+	if (!ha->mcp32)
+		ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n");
+
+	/* Load return mailbox registers. */
+	ha->flags.mbox_int = 1;
+	ha->mailbox_out32[0] = mb0;
+	wptr = (uint16_t __iomem *)&reg->mailbox17;
+
+	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
+		ha->mailbox_out32[cnt] = RD_REG_WORD(wptr);
+		wptr++;
+	}
+}
+
+/**
+ * qlafx00_intr_handler() - Process interrupts for the ISPFX00.
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qlafx00_intr_handler(int irq, void *dev_id)
+{
+	scsi_qla_host_t	*vha;
+	struct qla_hw_data *ha;
+	struct device_reg_fx00 __iomem *reg;
+	int		status;
+	unsigned long	iter;
+	uint32_t	stat;
+	uint32_t	mb[8];
+	struct rsp_que *rsp;
+	unsigned long	flags;
+	uint32_t clr_intr = 0;
+
+	rsp = (struct rsp_que *) dev_id;
+	if (!rsp) {
+		ql_log(ql_log_info, NULL, 0x507d,
+		    "%s: NULL response queue pointer.\n", __func__);
+		return IRQ_NONE;
+	}
+
+	ha = rsp->hw;
+	reg = &ha->iobase->ispfx00;
+	status = 0;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		return IRQ_HANDLED;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	vha = pci_get_drvdata(ha->pdev);
+	for (iter = 50; iter--; clr_intr = 0) {
+		stat = QLAFX00_RD_INTR_REG(ha);
+		if ((stat & QLAFX00_HST_INT_STS_BITS) == 0)
+			break;
+
+		switch (stat & QLAFX00_HST_INT_STS_BITS) {
+		case QLAFX00_INTR_MB_CMPLT:
+		case QLAFX00_INTR_MB_RSP_CMPLT:
+		case QLAFX00_INTR_MB_ASYNC_CMPLT:
+		case QLAFX00_INTR_ALL_CMPLT:
+			mb[0] = RD_REG_WORD(&reg->mailbox16);
+			qlafx00_mbx_completion(vha, mb[0]);
+			status |= MBX_INTERRUPT;
+			clr_intr |= QLAFX00_INTR_MB_CMPLT;
+			break;
+		case QLAFX00_INTR_ASYNC_CMPLT:
+		case QLAFX00_INTR_RSP_ASYNC_CMPLT:
+			ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0);
+			qlafx00_async_event(vha);
+			clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
+			break;
+		case QLAFX00_INTR_RSP_CMPLT:
+			qlafx00_process_response_queue(vha, rsp);
+			clr_intr |= QLAFX00_INTR_RSP_CMPLT;
+			break;
+		default:
+			ql_dbg(ql_dbg_async, vha, 0x507a,
+			    "Unrecognized interrupt type (%d).\n", stat);
+			break;
+		}
+		QLAFX00_CLR_INTR_REG(ha, clr_intr);
+		QLAFX00_RD_INTR_REG(ha);
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+		complete(&ha->mbx_intr_comp);
+	}
+	return IRQ_HANDLED;
+}
+
+/** QLAFX00 specific IOCB implementation functions */
+
+static inline cont_a64_entry_t *
+qlafx00_prep_cont_type1_iocb(struct req_que *req,
+			     cont_a64_entry_t *lcont_pkt)
+{
+	cont_a64_entry_t *cont_pkt;
+
+	/* Adjust ring index. */
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else {
+		req->ring_ptr++;
+	}
+
+	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
+
+	/* Load packet defaults. */
+	*((uint32_t *)(&lcont_pkt->entry_type)) =
+	    __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00);
+
+	return cont_pkt;
+}
+
+static inline void
+qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
+			 uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt)
+{
+	uint16_t	avail_dsds;
+	uint32_t	*cur_dsd;
+	scsi_qla_host_t	*vha;
+	struct scsi_cmnd *cmd;
+	struct scatterlist *sg;
+	int i, cont;
+	struct req_que *req;
+	cont_a64_entry_t lcont_pkt;
+	cont_a64_entry_t *cont_pkt;
+
+	vha = sp->fcport->vha;
+	req = vha->req;
+
+	cmd = GET_CMD_SP(sp);
+	cont = 0;
+	cont_pkt = NULL;
+
+	/* Update entry type to indicate Command Type 3 IOCB */
+	*((uint32_t *)(&lcmd_pkt->entry_type)) =
+	    __constant_cpu_to_le32(FX00_COMMAND_TYPE_7);
+
+	/* No data transfer */
+	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+		lcmd_pkt->byte_count = __constant_cpu_to_le32(0);
+		return;
+	}
+
+	/* Set transfer direction */
+	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+		lcmd_pkt->cntrl_flags =
+		    __constant_cpu_to_le16(TMF_WRITE_DATA);
+		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+		lcmd_pkt->cntrl_flags =
+		    __constant_cpu_to_le16(TMF_READ_DATA);
+		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+	}
+
+	/* One DSD is available in the Command Type 3 IOCB */
+	avail_dsds = 1;
+	cur_dsd = (uint32_t *)&lcmd_pkt->dseg_0_address;
+
+	/* Load data segments */
+	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+		dma_addr_t	sle_dma;
+
+		/* Allocate additional continuation packets? */
+		if (avail_dsds == 0) {
+			/*
+			 * Five DSDs are available in the Continuation
+			 * Type 1 IOCB.
+			 */
+			memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE);
+			cont_pkt =
+			    qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
+			cur_dsd = (uint32_t *)lcont_pkt.dseg_0_address;
+			avail_dsds = 5;
+			cont = 1;
+		}
+
+		sle_dma = sg_dma_address(sg);
+		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+		avail_dsds--;
+		if (avail_dsds == 0 && cont == 1) {
+			cont = 0;
+			memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
+			    REQUEST_ENTRY_SIZE);
+		}
+
+	}
+	if (avail_dsds != 0 && cont == 1) {
+		memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
+		    REQUEST_ENTRY_SIZE);
+	}
+}
+
+/**
+ * qlafx00_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qlafx00_start_scsi(srb_t *sp)
+{
+	int		ret, nseg;
+	unsigned long   flags;
+	uint32_t        index;
+	uint32_t	handle;
+	uint16_t	cnt;
+	uint16_t	req_cnt;
+	uint16_t	tot_dsds;
+	struct req_que *req = NULL;
+	struct rsp_que *rsp = NULL;
+	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+	struct scsi_qla_host *vha = sp->fcport->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct cmd_type_7_fx00 *cmd_pkt;
+	struct cmd_type_7_fx00 lcmd_pkt;
+	struct scsi_lun llun;
+	char		tag[2];
+
+	/* Setup device pointers. */
+	ret = 0;
+
+	rsp = ha->rsp_q_map[0];
+	req = vha->req;
+
+	/* So we know we haven't pci_map'ed anything yet */
+	tot_dsds = 0;
+
+	/* Forcing marker needed for now */
+	vha->marker_needed = 0;
+
+	/* Send marker if required */
+	if (vha->marker_needed != 0) {
+		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
+		    QLA_SUCCESS)
+			return QLA_FUNCTION_FAILED;
+		vha->marker_needed = 0;
+	}
+
+	/* Acquire ring specific lock */
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Check for room in outstanding command list. */
+	handle = req->current_outstanding_cmd;
+	for (index = 1; index < req->num_outstanding_cmds; index++) {
+		handle++;
+		if (handle == req->num_outstanding_cmds)
+			handle = 1;
+		if (!req->outstanding_cmds[handle])
+			break;
+	}
+	if (index == req->num_outstanding_cmds)
+		goto queuing_error;
+
+	/* Map the sg table so we have an accurate count of sg entries needed */
+	if (scsi_sg_count(cmd)) {
+		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+		    scsi_sg_count(cmd), cmd->sc_data_direction);
+		if (unlikely(!nseg))
+			goto queuing_error;
+	} else
+		nseg = 0;
+
+	tot_dsds = nseg;
+	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+	if (req->cnt < (req_cnt + 2)) {
+		cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
+
+		if (req->ring_index < cnt)
+			req->cnt = cnt - req->ring_index;
+		else
+			req->cnt = req->length -
+				(req->ring_index - cnt);
+		if (req->cnt < (req_cnt + 2))
+			goto queuing_error;
+	}
+
+	/* Build command packet. */
+	req->current_outstanding_cmd = handle;
+	req->outstanding_cmds[handle] = sp;
+	sp->handle = handle;
+	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+	req->cnt -= req_cnt;
+
+	cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr;
+
+	memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
+
+	lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
+	lcmd_pkt.handle_hi = 0;
+	lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);
+	lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id);
+
+	int_to_scsilun(cmd->device->lun, &llun);
+	host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun,
+	    sizeof(lcmd_pkt.lun));
+
+	/* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
+	if (scsi_populate_tag_msg(cmd, tag)) {
+		switch (tag[0]) {
+		case HEAD_OF_QUEUE_TAG:
+			lcmd_pkt.task = TSK_HEAD_OF_QUEUE;
+			break;
+		case ORDERED_QUEUE_TAG:
+			lcmd_pkt.task = TSK_ORDERED;
+			break;
+		}
+	}
+
+	/* Load SCSI command packet. */
+	host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb));
+	lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+	/* Build IOCB segments */
+	qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt);
+
+	/* Set total data segment count. */
+	lcmd_pkt.entry_count = (uint8_t)req_cnt;
+
+	/* Specify response queue number where completion should happen */
+	lcmd_pkt.entry_status = (uint8_t) rsp->id;
+
+	ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
+	    (uint8_t *)cmd->cmnd, cmd->cmd_len);
+	ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032,
+	    (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE);
+
+	memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE);
+	wmb();
+
+	/* Adjust ring index. */
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else
+		req->ring_ptr++;
+
+	sp->flags |= SRB_DMA_VALID;
+
+	/* Set chip new ring index. */
+	WRT_REG_DWORD(req->req_q_in, req->ring_index);
+	QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return QLA_SUCCESS;
+
+queuing_error:
+	if (tot_dsds)
+		scsi_dma_unmap(cmd);
+
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	return QLA_FUNCTION_FAILED;
+}
+
+void
+qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
+{
+	struct srb_iocb *fxio = &sp->u.iocb_cmd;
+	scsi_qla_host_t *vha = sp->fcport->vha;
+	struct req_que *req = vha->req;
+	struct tsk_mgmt_entry_fx00 tm_iocb;
+	struct scsi_lun llun;
+
+	memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
+	tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
+	tm_iocb.entry_count = 1;
+	tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+	tm_iocb.handle_hi = 0;
+	tm_iocb.timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
+	tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
+	tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
+	if (tm_iocb.control_flags == TCF_LUN_RESET) {
+		int_to_scsilun(fxio->u.tmf.lun, &llun);
+		host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun,
+		    sizeof(struct scsi_lun));
+	}
+
+	memcpy((void __iomem *)ptm_iocb, &tm_iocb,
+	    sizeof(struct tsk_mgmt_entry_fx00));
+	wmb();
+}
+
+void
+qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
+{
+	struct srb_iocb *fxio = &sp->u.iocb_cmd;
+	scsi_qla_host_t *vha = sp->fcport->vha;
+	struct req_que *req = vha->req;
+	struct abort_iocb_entry_fx00 abt_iocb;
+
+	memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
+	abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
+	abt_iocb.entry_count = 1;
+	abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
+	abt_iocb.abort_handle =
+	    cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl));
+	abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
+	abt_iocb.req_que_no = cpu_to_le16(req->id);
+
+	memcpy((void __iomem *)pabt_iocb, &abt_iocb,
+	    sizeof(struct abort_iocb_entry_fx00));
+	wmb();
+}
+
+void
+qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
+{
+	struct srb_iocb *fxio = &sp->u.iocb_cmd;
+	struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
+	struct fc_bsg_job *bsg_job;
+	struct fxdisc_entry_fx00 fx_iocb;
+	uint8_t entry_cnt = 1;
+
+	memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
+	fx_iocb.entry_type = FX00_IOCB_TYPE;
+	fx_iocb.handle = cpu_to_le32(sp->handle);
+	fx_iocb.entry_count = entry_cnt;
+
+	if (sp->type == SRB_FXIOCB_DCMD) {
+		fx_iocb.func_num =
+		    cpu_to_le16(sp->u.iocb_cmd.u.fxiocb.req_func_type);
+		fx_iocb.adapid = cpu_to_le32(fxio->u.fxiocb.adapter_id);
+		fx_iocb.adapid_hi = cpu_to_le32(fxio->u.fxiocb.adapter_id_hi);
+		fx_iocb.reserved_0 = cpu_to_le32(fxio->u.fxiocb.reserved_0);
+		fx_iocb.reserved_1 = cpu_to_le32(fxio->u.fxiocb.reserved_1);
+		fx_iocb.dataword_extra =
+		    cpu_to_le32(fxio->u.fxiocb.req_data_extra);
+
+		if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
+			fx_iocb.req_dsdcnt = cpu_to_le16(1);
+			fx_iocb.req_xfrcnt =
+			    cpu_to_le16(fxio->u.fxiocb.req_len);
+			fx_iocb.dseg_rq_address[0] =
+			    cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle));
+			fx_iocb.dseg_rq_address[1] =
+			    cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle));
+			fx_iocb.dseg_rq_len =
+			    cpu_to_le32(fxio->u.fxiocb.req_len);
+		}
+
+		if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
+			fx_iocb.rsp_dsdcnt = cpu_to_le16(1);
+			fx_iocb.rsp_xfrcnt =
+			    cpu_to_le16(fxio->u.fxiocb.rsp_len);
+			fx_iocb.dseg_rsp_address[0] =
+			    cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle));
+			fx_iocb.dseg_rsp_address[1] =
+			    cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle));
+			fx_iocb.dseg_rsp_len =
+			    cpu_to_le32(fxio->u.fxiocb.rsp_len);
+		}
+
+		if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) {
+			fx_iocb.dataword =
+			    cpu_to_le32(fxio->u.fxiocb.req_data);
+		}
+		fx_iocb.flags = fxio->u.fxiocb.flags;
+	} else {
+		struct scatterlist *sg;
+		bsg_job = sp->u.bsg_job;
+		piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
+			&bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+
+		fx_iocb.func_num = piocb_rqst->func_type;
+		fx_iocb.adapid = piocb_rqst->adapid;
+		fx_iocb.adapid_hi = piocb_rqst->adapid_hi;
+		fx_iocb.reserved_0 = piocb_rqst->reserved_0;
+		fx_iocb.reserved_1 = piocb_rqst->reserved_1;
+		fx_iocb.dataword_extra = piocb_rqst->dataword_extra;
+		fx_iocb.dataword = piocb_rqst->dataword;
+		fx_iocb.req_xfrcnt = cpu_to_le16(piocb_rqst->req_len);
+		fx_iocb.rsp_xfrcnt = cpu_to_le16(piocb_rqst->rsp_len);
+
+		if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
+			int avail_dsds, tot_dsds;
+			cont_a64_entry_t lcont_pkt;
+			cont_a64_entry_t *cont_pkt = NULL;
+			uint32_t *cur_dsd;
+			int index = 0, cont = 0;
+
+			fx_iocb.req_dsdcnt =
+			    cpu_to_le16(bsg_job->request_payload.sg_cnt);
+			tot_dsds =
+			    cpu_to_le32(bsg_job->request_payload.sg_cnt);
+			cur_dsd = (uint32_t *)&fx_iocb.dseg_rq_address[0];
+			avail_dsds = 1;
+			for_each_sg(bsg_job->request_payload.sg_list, sg,
+			    tot_dsds, index) {
+				dma_addr_t sle_dma;
+
+				/* Allocate additional continuation packets? */
+				if (avail_dsds == 0) {
+					/*
+					 * Five DSDs are available in the Cont.
+					 * Type 1 IOCB.
+					 */
+					memset(&lcont_pkt, 0,
+					    REQUEST_ENTRY_SIZE);
+					cont_pkt =
+					    qlafx00_prep_cont_type1_iocb(
+						sp->fcport->vha->req,
+						&lcont_pkt);
+					cur_dsd = (uint32_t *)
+					    lcont_pkt.dseg_0_address;
+					avail_dsds = 5;
+					cont = 1;
+					entry_cnt++;
+				}
+
+				sle_dma = sg_dma_address(sg);
+				*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
+				*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
+				*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+				avail_dsds--;
+
+				if (avail_dsds == 0 && cont == 1) {
+					cont = 0;
+					memcpy_toio(
+					    (void __iomem *)cont_pkt,
+					    &lcont_pkt, REQUEST_ENTRY_SIZE);
+					ql_dump_buffer(
+					    ql_dbg_user + ql_dbg_verbose,
+					    sp->fcport->vha, 0x3042,
+					    (uint8_t *)&lcont_pkt,
+					     REQUEST_ENTRY_SIZE);
+				}
+			}
+			if (avail_dsds != 0 && cont == 1) {
+				memcpy_toio((void __iomem *)cont_pkt,
+				    &lcont_pkt, REQUEST_ENTRY_SIZE);
+				ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+				    sp->fcport->vha, 0x3043,
+				    (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
+			}
+		}
+
+		if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
+			int avail_dsds, tot_dsds;
+			cont_a64_entry_t lcont_pkt;
+			cont_a64_entry_t *cont_pkt = NULL;
+			uint32_t *cur_dsd;
+			int index = 0, cont = 0;
+
+			fx_iocb.rsp_dsdcnt =
+			   cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+			tot_dsds = cpu_to_le32(bsg_job->reply_payload.sg_cnt);
+			cur_dsd = (uint32_t *)&fx_iocb.dseg_rsp_address[0];
+			avail_dsds = 1;
+
+			for_each_sg(bsg_job->reply_payload.sg_list, sg,
+			    tot_dsds, index) {
+				dma_addr_t sle_dma;
+
+				/* Allocate additional continuation packets? */
+				if (avail_dsds == 0) {
+					/*
+					* Five DSDs are available in the Cont.
+					* Type 1 IOCB.
+					*/
+					memset(&lcont_pkt, 0,
+					    REQUEST_ENTRY_SIZE);
+					cont_pkt =
+					    qlafx00_prep_cont_type1_iocb(
+						sp->fcport->vha->req,
+						&lcont_pkt);
+					cur_dsd = (uint32_t *)
+					    lcont_pkt.dseg_0_address;
+					avail_dsds = 5;
+					cont = 1;
+					entry_cnt++;
+				}
+
+				sle_dma = sg_dma_address(sg);
+				*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
+				*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
+				*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
+				avail_dsds--;
+
+				if (avail_dsds == 0 && cont == 1) {
+					cont = 0;
+					memcpy_toio((void __iomem *)cont_pkt,
+					    &lcont_pkt,
+					    REQUEST_ENTRY_SIZE);
+					ql_dump_buffer(
+					    ql_dbg_user + ql_dbg_verbose,
+					    sp->fcport->vha, 0x3045,
+					    (uint8_t *)&lcont_pkt,
+					    REQUEST_ENTRY_SIZE);
+				}
+			}
+			if (avail_dsds != 0 && cont == 1) {
+				memcpy_toio((void __iomem *)cont_pkt,
+				    &lcont_pkt, REQUEST_ENTRY_SIZE);
+				ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+				    sp->fcport->vha, 0x3046,
+				    (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
+			}
+		}
+
+		if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID)
+			fx_iocb.dataword = cpu_to_le32(piocb_rqst->dataword);
+		fx_iocb.flags = piocb_rqst->flags;
+		fx_iocb.entry_count = entry_cnt;
+	}
+
+	ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
+	    sp->fcport->vha, 0x3047,
+	    (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
+
+	memcpy((void __iomem *)pfxiocb, &fx_iocb,
+	    sizeof(struct fxdisc_entry_fx00));
+	wmb();
+}
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
new file mode 100644
index 0000000..cc327dc
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -0,0 +1,510 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_MR_H
+#define __QLA_MR_H
+
+/*
+ * The PCI VendorID and DeviceID for our board.
+ */
+#define PCI_DEVICE_ID_QLOGIC_ISPF001		0xF001
+
+/* FX00 specific definitions */
+
+#define FX00_COMMAND_TYPE_7	0x07	/* Command Type 7 entry for 7XXX */
+struct cmd_type_7_fx00 {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+	uint32_t handle_hi;
+
+	uint16_t tgt_idx;		/* Target Idx. */
+	uint16_t timeout;		/* Command timeout. */
+
+	uint16_t dseg_count;		/* Data segment count. */
+	uint16_t scsi_rsp_dsd_len;
+
+	struct scsi_lun lun;		/* LUN (LE). */
+
+	uint8_t cntrl_flags;
+
+	uint8_t task_mgmt_flags;	/* Task management flags. */
+
+	uint8_t task;
+
+	uint8_t crn;
+
+	uint8_t fcp_cdb[MAX_CMDSZ];	/* SCSI command words. */
+	uint32_t byte_count;		/* Total byte count. */
+
+	uint32_t dseg_0_address[2];	/* Data segment 0 address. */
+	uint32_t dseg_0_len;		/* Data segment 0 length. */
+};
+
+/*
+ * ISP queue - marker entry structure definition.
+ */
+struct mrk_entry_fx00 {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t handle_count;		/* Handle count. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+	uint32_t handle_hi;		/* System handle. */
+
+	uint16_t tgt_id;		/* Target ID. */
+
+	uint8_t modifier;		/* Modifier (7-0). */
+	uint8_t reserved_1;
+
+	uint8_t reserved_2[5];
+
+	uint8_t lun[8];			/* FCP LUN (BE). */
+	uint8_t reserved_3[36];
+};
+
+
+#define	STATUS_TYPE_FX00	0x01		/* Status entry. */
+struct sts_entry_fx00 {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+	uint32_t handle_hi;		/* System handle. */
+
+	uint16_t comp_status;		/* Completion status. */
+	uint16_t reserved_0;		/* OX_ID used by the firmware. */
+
+	uint32_t residual_len;		/* FW calc residual transfer length. */
+
+	uint16_t reserved_1;
+	uint16_t state_flags;		/* State flags. */
+
+	uint16_t reserved_2;
+	uint16_t scsi_status;		/* SCSI status. */
+
+	uint32_t sense_len;		/* FCP SENSE length. */
+	uint8_t data[32];		/* FCP response/sense information. */
+};
+
+
+#define MAX_HANDLE_COUNT	15
+#define MULTI_STATUS_TYPE_FX00	0x0D
+
+struct multi_sts_entry_fx00 {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t handle_count;
+	uint8_t entry_status;
+
+	uint32_t handles[MAX_HANDLE_COUNT];
+};
+
+#define TSK_MGMT_IOCB_TYPE_FX00		0x05
+struct tsk_mgmt_entry_fx00 {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+
+	uint32_t handle_hi;		/* System handle. */
+
+	uint16_t tgt_id;		/* Target Idx. */
+
+	uint16_t reserved_1;
+
+	uint16_t delay;			/* Activity delay in seconds. */
+
+	uint16_t timeout;		/* Command timeout. */
+
+	struct scsi_lun lun;		/* LUN (LE). */
+
+	uint32_t control_flags;		/* Control Flags. */
+
+	uint8_t reserved_2[32];
+};
+
+
+#define	ABORT_IOCB_TYPE_FX00	0x08		/* Abort IOCB status. */
+struct abort_iocb_entry_fx00 {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+	uint32_t handle_hi;		/* System handle. */
+
+	uint16_t tgt_id_sts;		/* Completion status. */
+	uint16_t options;
+
+	uint32_t abort_handle;		/* System handle. */
+	uint32_t abort_handle_hi;	/* System handle. */
+
+	uint16_t req_que_no;
+	uint8_t reserved_1[38];
+};
+
+#define IOCTL_IOSB_TYPE_FX00	0x0C
+struct ioctl_iocb_entry_fx00 {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+	uint32_t reserved_0;		/* System handle. */
+
+	uint16_t comp_func_num;
+	uint16_t fw_iotcl_flags;
+
+	uint32_t dataword_r;		/* Data word returned */
+	uint32_t adapid;		/* Adapter ID */
+	uint32_t adapid_hi;		/* Adapter ID high */
+	uint32_t reserved_1;
+
+	uint32_t seq_no;
+	uint8_t reserved_2[20];
+	uint32_t residuallen;
+	uint32_t status;
+};
+
+#define STATUS_CONT_TYPE_FX00 0x04
+
+#define FX00_IOCB_TYPE		0x0B
+struct fxdisc_entry_fx00 {
+	uint8_t entry_type;		/* Entry type. */
+	uint8_t entry_count;		/* Entry count. */
+	uint8_t sys_define;		/* System Defined. */
+	uint8_t entry_status;		/* Entry Status. */
+
+	uint32_t handle;		/* System handle. */
+	uint32_t reserved_0;		/* System handle. */
+
+	uint16_t func_num;
+	uint16_t req_xfrcnt;
+	uint16_t req_dsdcnt;
+	uint16_t rsp_xfrcnt;
+	uint16_t rsp_dsdcnt;
+	uint8_t flags;
+	uint8_t reserved_1;
+
+	uint32_t dseg_rq_address[2];	/* Data segment 0 address. */
+	uint32_t dseg_rq_len;		/* Data segment 0 length. */
+	uint32_t dseg_rsp_address[2];	/* Data segment 1 address. */
+	uint32_t dseg_rsp_len;		/* Data segment 1 length. */
+
+	uint32_t dataword;
+	uint32_t adapid;
+	uint32_t adapid_hi;
+	uint32_t dataword_extra;
+};
+
+struct qlafx00_tgt_node_info {
+	uint8_t tgt_node_wwpn[WWN_SIZE];
+	uint8_t tgt_node_wwnn[WWN_SIZE];
+	uint32_t tgt_node_state;
+	uint8_t reserved[128];
+	uint32_t reserved_1[8];
+	uint64_t reserved_2[4];
+} __packed;
+
+#define QLAFX00_TGT_NODE_INFO sizeof(struct qlafx00_tgt_node_info)
+
+#define QLAFX00_LINK_STATUS_DOWN	0x10
+#define QLAFX00_LINK_STATUS_UP		0x11
+
+#define QLAFX00_PORT_SPEED_2G	0x2
+#define QLAFX00_PORT_SPEED_4G	0x4
+#define QLAFX00_PORT_SPEED_8G	0x8
+#define QLAFX00_PORT_SPEED_10G	0xa
+struct port_info_data {
+	uint8_t         port_state;
+	uint8_t         port_type;
+	uint16_t        port_identifier;
+	uint32_t        up_port_state;
+	uint8_t         fw_ver_num[32];
+	uint8_t         portal_attrib;
+	uint16_t        host_option;
+	uint8_t         reset_delay;
+	uint8_t         pdwn_retry_cnt;
+	uint16_t        max_luns2tgt;
+	uint8_t         risc_ver;
+	uint8_t         pconn_option;
+	uint16_t        risc_option;
+	uint16_t        max_frame_len;
+	uint16_t        max_iocb_alloc;
+	uint16_t        exec_throttle;
+	uint8_t         retry_cnt;
+	uint8_t         retry_delay;
+	uint8_t         port_name[8];
+	uint8_t         port_id[3];
+	uint8_t         link_status;
+	uint8_t         plink_rate;
+	uint32_t        link_config;
+	uint16_t        adap_haddr;
+	uint8_t         tgt_disc;
+	uint8_t         log_tout;
+	uint8_t         node_name[8];
+	uint16_t        erisc_opt1;
+	uint8_t         resp_acc_tmr;
+	uint8_t         intr_del_tmr;
+	uint8_t         erisc_opt2;
+	uint8_t         alt_port_name[8];
+	uint8_t         alt_node_name[8];
+	uint8_t         link_down_tout;
+	uint8_t         conn_type;
+	uint8_t         fc_fw_mode;
+	uint32_t        uiReserved[48];
+} __packed;
+
+/* OS Type Designations */
+#define OS_TYPE_UNKNOWN             0
+#define OS_TYPE_LINUX               2
+
+/* Linux Info */
+#define SYSNAME_LENGTH              128
+#define NODENAME_LENGTH             64
+#define RELEASE_LENGTH              64
+#define VERSION_LENGTH              64
+#define MACHINE_LENGTH              64
+#define DOMNAME_LENGTH              64
+
+struct host_system_info {
+	uint32_t os_type;
+	char    sysname[SYSNAME_LENGTH];
+	char    nodename[NODENAME_LENGTH];
+	char    release[RELEASE_LENGTH];
+	char    version[VERSION_LENGTH];
+	char    machine[MACHINE_LENGTH];
+	char    domainname[DOMNAME_LENGTH];
+	char    hostdriver[VERSION_LENGTH];
+	uint32_t reserved[64];
+} __packed;
+
+struct register_host_info {
+	struct host_system_info     hsi;	/* host system info */
+	uint64_t        utc;			/* UTC (system time) */
+	uint32_t        reserved[64];		/* future additions */
+} __packed;
+
+
+#define QLAFX00_PORT_DATA_INFO (sizeof(struct port_info_data))
+#define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32)
+
+struct config_info_data {
+	uint8_t		product_name[256];
+	uint8_t		symbolic_name[64];
+	uint8_t		serial_num[32];
+	uint8_t		hw_version[16];
+	uint8_t		fw_version[16];
+	uint8_t		uboot_version[16];
+	uint8_t		fru_serial_num[32];
+
+	uint8_t		fc_port_count;
+	uint8_t		iscsi_port_count;
+	uint8_t		reserved1[2];
+
+	uint8_t		mode;
+	uint8_t		log_level;
+	uint8_t		reserved2[2];
+
+	uint32_t	log_size;
+
+	uint8_t		tgt_pres_mode;
+	uint8_t		iqn_flags;
+	uint8_t		lun_mapping;
+
+	uint64_t	adapter_id;
+
+	uint32_t	cluster_key_len;
+	uint8_t		cluster_key[10];
+
+	uint64_t	cluster_master_id;
+	uint64_t	cluster_slave_id;
+	uint8_t		cluster_flags;
+} __packed;
+
+#define FXDISC_GET_CONFIG_INFO		0x01
+#define FXDISC_GET_PORT_INFO		0x02
+#define FXDISC_GET_TGT_NODE_INFO	0x80
+#define FXDISC_GET_TGT_NODE_LIST	0x81
+#define FXDISC_REG_HOST_INFO		0x99
+
+#define QLAFX00_HBA_ICNTRL_REG		0x21B08
+#define QLAFX00_ICR_ENB_MASK            0x80000000
+#define QLAFX00_ICR_DIS_MASK            0x7fffffff
+#define QLAFX00_HST_RST_REG		0x18264
+#define QLAFX00_HST_TO_HBA_REG		0x20A04
+#define QLAFX00_HBA_TO_HOST_REG		0x21B70
+#define QLAFX00_HST_INT_STS_BITS	0x7
+#define QLAFX00_BAR1_BASE_ADDR_REG	0x40018
+#define QLAFX00_PEX0_WIN0_BASE_ADDR_REG	0x41824
+
+#define QLAFX00_INTR_MB_CMPLT		0x1
+#define QLAFX00_INTR_RSP_CMPLT		0x2
+#define QLAFX00_INTR_MB_RSP_CMPLT	0x3
+#define QLAFX00_INTR_ASYNC_CMPLT	0x4
+#define QLAFX00_INTR_MB_ASYNC_CMPLT	0x5
+#define QLAFX00_INTR_RSP_ASYNC_CMPLT	0x6
+#define QLAFX00_INTR_ALL_CMPLT		0x7
+
+#define QLAFX00_MBA_SYSTEM_ERR		0x8002
+#define QLAFX00_MBA_LINK_UP		0x8011
+#define QLAFX00_MBA_LINK_DOWN		0x8012
+#define QLAFX00_MBA_PORT_UPDATE		0x8014
+#define QLAFX00_MBA_SHUTDOWN_RQSTD	0x8062
+
+#define SOC_SW_RST_CONTROL_REG_CORE0     0x0020800
+#define SOC_FABRIC_RST_CONTROL_REG       0x0020840
+#define SOC_FABRIC_CONTROL_REG           0x0020200
+#define SOC_FABRIC_CONFIG_REG            0x0020204
+
+#define SOC_INTERRUPT_SOURCE_I_CONTROL_REG     0x0020B00
+#define SOC_CORE_TIMER_REG                     0x0021850
+#define SOC_IRQ_ACK_REG                        0x00218b4
+
+#define CONTINUE_A64_TYPE_FX00	0x03	/* Continuation entry. */
+
+#define QLAFX00_SET_HST_INTR(ha, value) \
+	WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \
+	value)
+
+#define QLAFX00_CLR_HST_INTR(ha, value) \
+	WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
+	~value)
+
+#define QLAFX00_RD_INTR_REG(ha) \
+	RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG)
+
+#define QLAFX00_CLR_INTR_REG(ha, value) \
+	WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \
+	~value)
+
+#define QLAFX00_SET_HBA_SOC_REG(ha, off, val)\
+	WRT_REG_DWORD((ha)->cregbase + off, val)
+
+#define QLAFX00_GET_HBA_SOC_REG(ha, off)\
+	RD_REG_DWORD((ha)->cregbase + off)
+
+#define QLAFX00_HBA_RST_REG(ha, val)\
+	WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_RST_REG, val)
+
+#define QLAFX00_RD_ICNTRL_REG(ha) \
+	RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG)
+
+#define QLAFX00_ENABLE_ICNTRL_REG(ha) \
+	WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
+	(QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) | \
+	 QLAFX00_ICR_ENB_MASK))
+
+#define QLAFX00_DISABLE_ICNTRL_REG(ha) \
+	WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \
+	(QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) & \
+	 QLAFX00_ICR_DIS_MASK))
+
+#define QLAFX00_RD_REG(ha, off) \
+	RD_REG_DWORD((ha)->cregbase + off)
+
+#define QLAFX00_WR_REG(ha, off, val) \
+	WRT_REG_DWORD((ha)->cregbase + off, val)
+
+struct qla_mt_iocb_rqst_fx00 {
+	uint32_t reserved_0;
+
+	uint16_t func_type;
+	uint8_t flags;
+	uint8_t reserved_1;
+
+	uint32_t dataword;
+
+	uint32_t adapid;
+	uint32_t adapid_hi;
+
+	uint32_t dataword_extra;
+
+	uint32_t req_len;
+
+	uint32_t rsp_len;
+};
+
+struct qla_mt_iocb_rsp_fx00 {
+	uint32_t reserved_1;
+
+	uint16_t func_type;
+	uint16_t ioctl_flags;
+
+	uint32_t ioctl_data;
+
+	uint32_t adapid;
+	uint32_t adapid_hi;
+
+	uint32_t reserved_2;
+	uint32_t seq_number;
+
+	uint8_t reserved_3[20];
+
+	int32_t res_count;
+
+	uint32_t status;
+};
+
+
+#define MAILBOX_REGISTER_COUNT_FX00	16
+#define AEN_MAILBOX_REGISTER_COUNT_FX00	8
+#define MAX_FIBRE_DEVICES_FX00	512
+#define MAX_LUNS_FX00		0x1024
+#define MAX_TARGETS_FX00	MAX_ISA_DEVICES
+#define REQUEST_ENTRY_CNT_FX00		512	/* Number of request entries. */
+#define RESPONSE_ENTRY_CNT_FX00		256	/* Number of response entries.*/
+
+/*
+ * Firmware state codes for QLAFX00 adapters
+ */
+#define FSTATE_FX00_CONFIG_WAIT     0x0000	/* Waiting for driver to issue
+						 * Initialize FW Mbox cmd
+						 */
+#define FSTATE_FX00_INITIALIZED     0x1000	/* FW has been initialized by
+						 * the driver
+						 */
+
+#define FX00_DEF_RATOV	10
+
+struct mr_data_fx00 {
+	uint8_t	product_name[256];
+	uint8_t	symbolic_name[64];
+	uint8_t	serial_num[32];
+	uint8_t	hw_version[16];
+	uint8_t	fw_version[16];
+	uint8_t	uboot_version[16];
+	uint8_t	fru_serial_num[32];
+	fc_port_t       fcport;		/* fcport used for requests
+					 * that are not linked
+					 * to a particular target
+					 */
+	uint8_t fw_hbt_en;
+	uint8_t fw_hbt_cnt;
+	uint8_t fw_hbt_miss_cnt;
+	uint32_t old_fw_hbt_cnt;
+	uint16_t fw_reset_timer_tick;
+	uint8_t fw_reset_timer_exp;
+	uint32_t old_aenmbx0_state;
+};
+
+#define QLAFX00_LOOP_DOWN_TIME		615     /* 600 */
+#define QLAFX00_HEARTBEAT_INTERVAL	6	/* number of seconds */
+#define QLAFX00_HEARTBEAT_MISS_CNT	3	/* number of miss */
+#define QLAFX00_RESET_INTERVAL		120	/* number of seconds */
+#define QLAFX00_MAX_RESET_INTERVAL	600	/* number of seconds */
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2c6dd3d..a083715 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -47,6 +47,7 @@
 		"Specify if Class 2 operations are supported from the very "
 		"beginning. Default is 0 - class 2 not supported.");
 
+
 int ql2xlogintimeout = 20;
 module_param(ql2xlogintimeout, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xlogintimeout,
@@ -354,7 +355,12 @@
 
 static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
 {
-	if (req && req->ring)
+	if (IS_QLAFX00(ha)) {
+		if (req && req->ring_fx00)
+			dma_free_coherent(&ha->pdev->dev,
+			    (req->length_fx00 + 1) * sizeof(request_t),
+			    req->ring_fx00, req->dma_fx00);
+	} else if (req && req->ring)
 		dma_free_coherent(&ha->pdev->dev,
 		(req->length + 1) * sizeof(request_t),
 		req->ring, req->dma);
@@ -368,11 +374,16 @@
 
 static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
 {
-	if (rsp && rsp->ring)
+	if (IS_QLAFX00(ha)) {
+		if (rsp && rsp->ring)
+			dma_free_coherent(&ha->pdev->dev,
+			    (rsp->length_fx00 + 1) * sizeof(request_t),
+			    rsp->ring_fx00, rsp->dma_fx00);
+	} else if (rsp && rsp->ring) {
 		dma_free_coherent(&ha->pdev->dev,
 		(rsp->length + 1) * sizeof(response_t),
 		rsp->ring, rsp->dma);
-
+	}
 	kfree(rsp);
 	rsp = NULL;
 }
@@ -633,7 +644,7 @@
 	qla2x00_rel_sp(sp->fcport->vha, sp);
 }
 
-static void
+void
 qla2x00_sp_compl(void *data, void *ptr, int res)
 {
 	struct qla_hw_data *ha = (struct qla_hw_data *)data;
@@ -657,6 +668,9 @@
 	cmd->scsi_done(cmd);
 }
 
+/* If we are SP1 here, we need to still take and release the host_lock as SP1
+ * does not have the changes necessary to avoid taking host->host_lock.
+ */
 static int
 qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 {
@@ -1304,6 +1318,9 @@
 		}
 	}
 
+	if (IS_QLAFX00(ha))
+		return QLA_SUCCESS;
+
 	if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
 		atomic_set(&vha->loop_state, LOOP_DOWN);
 		atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -1858,6 +1875,7 @@
 	.start_scsi		= qla2x00_start_scsi,
 	.abort_isp		= qla2x00_abort_isp,
 	.iospace_config     	= qla2x00_iospace_config,
+	.initialize_adapter	= qla2x00_initialize_adapter,
 };
 
 static struct isp_operations qla2300_isp_ops = {
@@ -1895,6 +1913,7 @@
 	.start_scsi		= qla2x00_start_scsi,
 	.abort_isp		= qla2x00_abort_isp,
 	.iospace_config     	= qla2x00_iospace_config,
+	.initialize_adapter	= qla2x00_initialize_adapter,
 };
 
 static struct isp_operations qla24xx_isp_ops = {
@@ -1932,6 +1951,7 @@
 	.start_scsi		= qla24xx_start_scsi,
 	.abort_isp		= qla2x00_abort_isp,
 	.iospace_config     	= qla2x00_iospace_config,
+	.initialize_adapter	= qla2x00_initialize_adapter,
 };
 
 static struct isp_operations qla25xx_isp_ops = {
@@ -1969,6 +1989,7 @@
 	.start_scsi		= qla24xx_dif_start_scsi,
 	.abort_isp		= qla2x00_abort_isp,
 	.iospace_config     	= qla2x00_iospace_config,
+	.initialize_adapter	= qla2x00_initialize_adapter,
 };
 
 static struct isp_operations qla81xx_isp_ops = {
@@ -2006,6 +2027,7 @@
 	.start_scsi		= qla24xx_dif_start_scsi,
 	.abort_isp		= qla2x00_abort_isp,
 	.iospace_config     	= qla2x00_iospace_config,
+	.initialize_adapter	= qla2x00_initialize_adapter,
 };
 
 static struct isp_operations qla82xx_isp_ops = {
@@ -2043,6 +2065,7 @@
 	.start_scsi             = qla82xx_start_scsi,
 	.abort_isp		= qla82xx_abort_isp,
 	.iospace_config     	= qla82xx_iospace_config,
+	.initialize_adapter	= qla2x00_initialize_adapter,
 };
 
 static struct isp_operations qla83xx_isp_ops = {
@@ -2080,6 +2103,45 @@
 	.start_scsi		= qla24xx_dif_start_scsi,
 	.abort_isp		= qla2x00_abort_isp,
 	.iospace_config		= qla83xx_iospace_config,
+	.initialize_adapter	= qla2x00_initialize_adapter,
+};
+
+static struct isp_operations qlafx00_isp_ops = {
+	.pci_config		= qlafx00_pci_config,
+	.reset_chip		= qlafx00_soft_reset,
+	.chip_diag		= qlafx00_chip_diag,
+	.config_rings		= qlafx00_config_rings,
+	.reset_adapter		= qlafx00_soft_reset,
+	.nvram_config		= NULL,
+	.update_fw_options	= NULL,
+	.load_risc		= NULL,
+	.pci_info_str		= qlafx00_pci_info_str,
+	.fw_version_str		= qlafx00_fw_version_str,
+	.intr_handler		= qlafx00_intr_handler,
+	.enable_intrs		= qlafx00_enable_intrs,
+	.disable_intrs		= qlafx00_disable_intrs,
+	.abort_command		= qlafx00_abort_command,
+	.target_reset		= qlafx00_abort_target,
+	.lun_reset		= qlafx00_lun_reset,
+	.fabric_login		= NULL,
+	.fabric_logout		= NULL,
+	.calc_req_entries	= NULL,
+	.build_iocbs		= NULL,
+	.prep_ms_iocb		= qla24xx_prep_ms_iocb,
+	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,
+	.read_nvram		= qla24xx_read_nvram_data,
+	.write_nvram		= qla24xx_write_nvram_data,
+	.fw_dump		= NULL,
+	.beacon_on		= qla24xx_beacon_on,
+	.beacon_off		= qla24xx_beacon_off,
+	.beacon_blink		= NULL,
+	.read_optrom		= qla24xx_read_optrom_data,
+	.write_optrom		= qla24xx_write_optrom_data,
+	.get_flash_version	= qla24xx_get_flash_version,
+	.start_scsi		= qlafx00_start_scsi,
+	.abort_isp		= qlafx00_abort_isp,
+	.iospace_config		= qlafx00_iospace_config,
+	.initialize_adapter	= qlafx00_initialize_adapter,
 };
 
 static inline void
@@ -2192,6 +2254,9 @@
 		ha->device_type |= DT_T10_PI;
 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
 		break;
+	case PCI_DEVICE_ID_QLOGIC_ISPF001:
+		ha->device_type |= DT_ISPFX00;
+		break;
 	}
 
 	if (IS_QLA82XX(ha))
@@ -2265,7 +2330,8 @@
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
-	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031) {
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001) {
 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
 		mem_only = 1;
 		ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2436,6 +2502,18 @@
 		ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
 		ha->nvram_conf_off = ~0;
 		ha->nvram_data_off = ~0;
+	}  else if (IS_QLAFX00(ha)) {
+		ha->max_fibre_devices = MAX_FIBRE_DEVICES_FX00;
+		ha->mbx_count = MAILBOX_REGISTER_COUNT_FX00;
+		ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;
+		req_length = REQUEST_ENTRY_CNT_FX00;
+		rsp_length = RESPONSE_ENTRY_CNT_FX00;
+		ha->init_cb_size = sizeof(struct init_cb_fx);
+		ha->isp_ops = &qlafx00_isp_ops;
+		ha->port_down_retry_count = 30; /* default value */
+		ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
+		ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+		ha->mr.fw_hbt_en = 1;
 	}
 
 	ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
@@ -2500,13 +2578,24 @@
 
 	host = base_vha->host;
 	base_vha->req = req;
-	host->can_queue = req->length + 128;
+	if (IS_QLAFX00(ha))
+		host->can_queue = 1024;
+	else
+		host->can_queue = req->length + 128;
 	if (IS_QLA2XXX_MIDTYPE(ha))
 		base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
 	else
 		base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER +
 						base_vha->vp_idx;
 
+	/* Setup fcport template structure. */
+	ha->mr.fcport.vha = base_vha;
+	ha->mr.fcport.port_type = FCT_UNKNOWN;
+	ha->mr.fcport.loop_id = FC_NO_LOOP_ID;
+	qla2x00_set_fcport_state(&ha->mr.fcport, FCS_UNCONFIGURED);
+	ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED;
+	ha->mr.fcport.scan_state = 1;
+
 	/* Set the SG table size based on ISP type */
 	if (!IS_FWI2_CAPABLE(ha)) {
 		if (IS_QLA2100(ha))
@@ -2562,6 +2651,13 @@
 	rsp->req = req;
 	req->rsp = rsp;
 
+	if (IS_QLAFX00(ha)) {
+		ha->rsp_q_map[0] = rsp;
+		ha->req_q_map[0] = req;
+		set_bit(0, ha->req_qid_map);
+		set_bit(0, ha->rsp_qid_map);
+	}
+
 	/* FWI2-capable only. */
 	req->req_q_in = &ha->iobase->isp24.req_q_in;
 	req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -2574,6 +2670,13 @@
 		rsp->rsp_q_out =  &ha->mqiobase->isp25mq.rsp_q_out;
 	}
 
+	if (IS_QLAFX00(ha)) {
+		req->req_q_in = &ha->iobase->ispfx00.req_q_in;
+		req->req_q_out = &ha->iobase->ispfx00.req_q_out;
+		rsp->rsp_q_in = &ha->iobase->ispfx00.rsp_q_in;
+		rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out;
+	}
+
 	if (IS_QLA82XX(ha)) {
 		req->req_q_out = &ha->iobase->isp82.req_q_out[0];
 		rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
@@ -2595,7 +2698,7 @@
 	    "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
 	    req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
 
-	if (qla2x00_initialize_adapter(base_vha)) {
+	if (ha->isp_ops->initialize_adapter(base_vha)) {
 		ql_log(ql_log_fatal, base_vha, 0x00d6,
 		    "Failed to initialize adapter - Adapter flags %x.\n",
 		    base_vha->device_flags);
@@ -2720,6 +2823,18 @@
 
 	qla2x00_alloc_sysfs_attr(base_vha);
 
+	if (IS_QLAFX00(ha)) {
+		ret = qlafx00_fx_disc(base_vha,
+			&base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
+
+		ret = qlafx00_fx_disc(base_vha,
+			&base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO);
+
+		/* Register system information */
+		ret =  qlafx00_fx_disc(base_vha,
+			&base_vha->hw->mr.fcport, FXDISC_REG_HOST_INFO);
+	}
+
 	qla2x00_init_host_attr(base_vha);
 
 	qla2x00_dfs_setup(base_vha);
@@ -2777,6 +2892,8 @@
 	} else {
 		if (ha->iobase)
 			iounmap(ha->iobase);
+		if (ha->cregbase)
+			iounmap(ha->cregbase);
 	}
 	pci_release_selected_regions(ha->pdev, ha->bars);
 	kfree(ha);
@@ -2960,6 +3077,9 @@
 		if (ha->iobase)
 			iounmap(ha->iobase);
 
+		if (ha->cregbase)
+			iounmap(ha->cregbase);
+
 		if (ha->mqiobase)
 			iounmap(ha->mqiobase);
 
@@ -3068,6 +3188,12 @@
 void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
     int do_login, int defer)
 {
+	if (IS_QLAFX00(vha->hw)) {
+		qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+		qla2x00_schedule_rport_del(vha, fcport, defer);
+		return;
+	}
+
 	if (atomic_read(&fcport->state) == FCS_ONLINE &&
 	    vha->vp_idx == fcport->vha->vp_idx) {
 		qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
@@ -3710,6 +3836,22 @@
 	kobject_uevent_env(&vha->hw->pdev->dev.kobj, KOBJ_CHANGE, envp);
 }
 
+int
+qlafx00_post_aenfx_work(struct scsi_qla_host *vha,  uint32_t evtcode,
+			uint32_t *data, int cnt)
+{
+	struct qla_work_evt *e;
+
+	e = qla2x00_alloc_work(vha, QLA_EVT_AENFX);
+	if (!e)
+		return QLA_FUNCTION_FAILED;
+
+	e->u.aenfx.evtcode = evtcode;
+	e->u.aenfx.count = cnt;
+	memcpy(e->u.aenfx.mbx, data, sizeof(*data) * cnt);
+	return qla2x00_post_work(vha, e);
+}
+
 void
 qla2x00_do_work(struct scsi_qla_host *vha)
 {
@@ -3758,6 +3900,9 @@
 		case QLA_EVT_UEVENT:
 			qla2x00_uevent_emit(vha, e->u.uevent.code);
 			break;
+		case QLA_EVT_AENFX:
+			qlafx00_process_aen(vha, e);
+			break;
 		}
 		if (e->flags & QLA_EVT_FLAG_FREE)
 			kfree(e);
@@ -4592,6 +4737,38 @@
 				ql_dbg(ql_dbg_dpc, base_vha, 0x4006,
 				    "FCoE context reset end.\n");
 			}
+		} else if (IS_QLAFX00(ha)) {
+			if (test_and_clear_bit(ISP_UNRECOVERABLE,
+				&base_vha->dpc_flags)) {
+				ql_dbg(ql_dbg_dpc, base_vha, 0x4020,
+				    "Firmware Reset Recovery\n");
+				if (qlafx00_reset_initialize(base_vha)) {
+					/* Failed. Abort isp later. */
+					if (!test_bit(UNLOADING,
+					    &base_vha->dpc_flags))
+						set_bit(ISP_UNRECOVERABLE,
+						    &base_vha->dpc_flags);
+						ql_dbg(ql_dbg_dpc, base_vha,
+						    0x4021,
+						    "Reset Recovery Failed\n");
+				}
+			}
+
+			if (test_and_clear_bit(FX00_TARGET_SCAN,
+				&base_vha->dpc_flags)) {
+				ql_dbg(ql_dbg_dpc, base_vha, 0x4022,
+				    "ISPFx00 Target Scan scheduled\n");
+				if (qlafx00_rescan_isp(base_vha)) {
+					if (!test_bit(UNLOADING,
+					    &base_vha->dpc_flags))
+						set_bit(ISP_UNRECOVERABLE,
+						    &base_vha->dpc_flags);
+					ql_dbg(ql_dbg_dpc, base_vha, 0x401e,
+					    "ISPFx00 Target Scan Failed\n");
+				}
+				ql_dbg(ql_dbg_dpc, base_vha, 0x401f,
+				    "ISPFx00 Target Scan End\n");
+			}
 		}
 
 		if (test_and_clear_bit(ISP_ABORT_NEEDED,
@@ -4630,6 +4807,9 @@
 			clear_bit(SCR_PENDING, &base_vha->dpc_flags);
 		}
 
+		if (IS_QLAFX00(ha))
+			goto loop_resync_check;
+
 		if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
 			ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
 			    "Quiescence mode scheduled.\n");
@@ -4654,7 +4834,7 @@
 		}
 
 		if (test_and_clear_bit(RESET_MARKER_NEEDED,
-							&base_vha->dpc_flags) &&
+				&base_vha->dpc_flags) &&
 		    (!(test_and_set_bit(RESET_ACTIVE, &base_vha->dpc_flags)))) {
 
 			ql_dbg(ql_dbg_dpc, base_vha, 0x400b,
@@ -4677,9 +4857,9 @@
 			ql_dbg(ql_dbg_dpc, base_vha, 0x400e,
 			    "Relogin end.\n");
 		}
-
+loop_resync_check:
 		if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
-							&base_vha->dpc_flags)) {
+		    &base_vha->dpc_flags)) {
 
 			ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
 			    "Loop resync scheduled.\n");
@@ -4697,6 +4877,9 @@
 			    "Loop resync end.\n");
 		}
 
+		if (IS_QLAFX00(ha))
+			goto intr_on_check;
+
 		if (test_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags) &&
 		    atomic_read(&base_vha->loop_state) == LOOP_READY) {
 			clear_bit(NPIV_CONFIG_NEEDED, &base_vha->dpc_flags);
@@ -4714,7 +4897,7 @@
 		if (test_and_clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
 		    &base_vha->dpc_flags))
 			qla2x00_host_ramp_up_queuedepth(base_vha);
-
+intr_on_check:
 		if (!ha->interrupts_on)
 			ha->isp_ops->enable_intrs(ha);
 
@@ -4722,7 +4905,8 @@
 					&base_vha->dpc_flags))
 			ha->isp_ops->beacon_blink(base_vha);
 
-		qla2x00_do_dpc_all_vps(base_vha);
+		if (!IS_QLAFX00(ha))
+			qla2x00_do_dpc_all_vps(base_vha);
 
 		ha->dpc_active = 0;
 end_loop:
@@ -4818,6 +5002,9 @@
 		qla82xx_watchdog(vha);
 	}
 
+	if (!vha->vp_idx && IS_QLAFX00(ha))
+		qlafx00_timer_routine(vha);
+
 	/* Loop down handler. */
 	if (atomic_read(&vha->loop_down_timer) > 0 &&
 	    !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
@@ -5335,6 +5522,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
 	{ 0 },
 };
 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 61b5d8c..fcdc223 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2585,25 +2585,6 @@
 	ha->tgt.tgt_ops->free_cmd(cmd);
 }
 
-/* ha->hardware_lock supposed to be held on entry */
-/* called via callback from qla2xxx */
-void qlt_ctio_completion(struct scsi_qla_host *vha, uint32_t handle)
-{
-	struct qla_hw_data *ha = vha->hw;
-	struct qla_tgt *tgt = ha->tgt.qla_tgt;
-
-	if (likely(tgt == NULL)) {
-		ql_dbg(ql_dbg_tgt, vha, 0xe021,
-		    "CTIO, but target mode not enabled"
-		    " (ha %d %p handle %#x)", vha->vp_idx, ha, handle);
-		return;
-	}
-
-	tgt->irq_cmd_count++;
-	qlt_do_ctio_completion(vha, handle, CTIO_SUCCESS, NULL);
-	tgt->irq_cmd_count--;
-}
-
 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
 	uint8_t task_codes)
 {
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index ff9ccb9..b33e411 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -980,7 +980,6 @@
 extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
 extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
 extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
-extern void qlt_ctio_completion(struct scsi_qla_host *, uint32_t);
 extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
 extern void qlt_enable_vha(struct scsi_qla_host *);
 extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index ec54036..6c66d22 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.04.00.13-k"
+#define QLA2XXX_VERSION      "8.05.00.03-k"
 
 #define QLA_DRIVER_MAJOR_VER	8
-#define QLA_DRIVER_MINOR_VER	4
+#define QLA_DRIVER_MINOR_VER	5
 #define QLA_DRIVER_PATCH_VER	0
 #define QLA_DRIVER_BETA_VER	0
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index 5d8fe4f..d607eb8 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -1629,9 +1629,37 @@
 	ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n");
 }
 
+/**
+ * qla4_83xx_eport_init - Initialize EPort.
+ * @ha: Pointer to host adapter structure.
+ *
+ * If EPort hardware is in reset state before disabling pause, there would be
+ * serious hardware wedging issues. To prevent this perform eport init everytime
+ * before disabling pause frames.
+ **/
+static void qla4_83xx_eport_init(struct scsi_qla_host *ha)
+{
+	/* Clear the 8 registers */
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_REG, 0x0);
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT0, 0x0);
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT1, 0x0);
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT2, 0x0);
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT3, 0x0);
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_SRE_SHIM, 0x0);
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_EPG_SHIM, 0x0);
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_ETHER_PCS, 0x0);
+
+	/* Write any value to Reset Control register */
+	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_CONTROL, 0xFF);
+
+	ql4_printk(KERN_INFO, ha, "EPORT is out of reset.\n");
+}
+
 void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
 {
 	ha->isp_ops->idc_lock(ha);
+	/* Before disabling pause frames, ensure that eport is not in reset */
+	qla4_83xx_eport_init(ha);
 	qla4_83xx_dump_pause_control_regs(ha);
 	__qla4_83xx_disable_pause(ha);
 	ha->isp_ops->idc_unlock(ha);
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h
index 6a00f90..fab237f 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.h
+++ b/drivers/scsi/qla4xxx/ql4_83xx.h
@@ -55,6 +55,16 @@
 #define QLA83XX_SET_PAUSE_VAL		0x0
 #define QLA83XX_SET_TC_MAX_CELL_VAL	0x03FF03FF
 
+#define QLA83XX_RESET_CONTROL		0x28084E50
+#define QLA83XX_RESET_REG		0x28084E60
+#define QLA83XX_RESET_PORT0		0x28084E70
+#define QLA83XX_RESET_PORT1		0x28084E80
+#define QLA83XX_RESET_PORT2		0x28084E90
+#define QLA83XX_RESET_PORT3		0x28084EA0
+#define QLA83XX_RESET_SRE_SHIM		0x28084EB0
+#define QLA83XX_RESET_EPG_SHIM		0x28084EC0
+#define QLA83XX_RESET_ETHER_PCS		0x28084ED0
+
 /* qla_83xx_reg_tbl registers */
 #define QLA83XX_PEG_HALT_STATUS1	0x34A8
 #define QLA83XX_PEG_HALT_STATUS2	0x34AC
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.h b/drivers/scsi/qla4xxx/ql4_dbg.h
index 5b0afc1..51c365b 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.h
+++ b/drivers/scsi/qla4xxx/ql4_dbg.h
@@ -12,6 +12,7 @@
 /* #define QL_DEBUG_LEVEL_3  */		/* Output function tracing */
 /* #define QL_DEBUG_LEVEL_4  */
 /* #define QL_DEBUG_LEVEL_5  */
+/* #define QL_DEBUG_LEVEL_7  */
 /* #define QL_DEBUG_LEVEL_9  */
 
 #define QL_DEBUG_LEVEL_2	/* ALways enable error messagess */
@@ -48,6 +49,12 @@
 #define DEBUG5(x)	do {} while (0);
 #endif				/*  */
 
+#if defined(QL_DEBUG_LEVEL_7)
+#define DEBUG7(x)	do {x; } while (0)
+#else				/*  */
+#define DEBUG7(x)	do {} while (0)
+#endif				/*  */
+
 #if defined(QL_DEBUG_LEVEL_9)
 #define DEBUG9(x)	do {x;} while (0);
 #else				/*  */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 129f5dd..ddf16a8 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -159,6 +159,22 @@
 #define LSDW(x) ((u32)((u64)(x)))
 #define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
 
+#define DEV_DB_NON_PERSISTENT	0
+#define DEV_DB_PERSISTENT	1
+
+#define COPY_ISID(dst_isid, src_isid) {			\
+	int i, j;					\
+	for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;)	\
+		dst_isid[i++] = src_isid[j--];		\
+}
+
+#define SET_BITVAL(o, n, v) {	\
+	if (o)			\
+		n |= v;		\
+	else			\
+		n &= ~v;	\
+}
+
 /*
  * Retry & Timeout Values
  */
@@ -363,6 +379,8 @@
 	uint32_t flt_iscsi_param;
 	uint32_t flt_region_chap;
 	uint32_t flt_chap_size;
+	uint32_t flt_region_ddb;
+	uint32_t flt_ddb_size;
 };
 
 struct qla4_8xxx_legacy_intr_set {
@@ -501,6 +519,7 @@
 #define AF_INIT_DONE			1 /* 0x00000002 */
 #define AF_MBOX_COMMAND			2 /* 0x00000004 */
 #define AF_MBOX_COMMAND_DONE		3 /* 0x00000008 */
+#define AF_ST_DISCOVERY_IN_PROGRESS	4 /* 0x00000010 */
 #define AF_INTERRUPTS_ON		6 /* 0x00000040 */
 #define AF_GET_CRASH_RECORD		7 /* 0x00000080 */
 #define AF_LINK_UP			8 /* 0x00000100 */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index ad9d2e2..c7b8892 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -288,6 +288,8 @@
 #define FA_GOLD_RISC_CODE_ADDR_82	0x80000
 #define FA_FLASH_ISCSI_CHAP		0x540000
 #define FA_FLASH_CHAP_SIZE		0xC0000
+#define FA_FLASH_ISCSI_DDB		0x420000
+#define FA_FLASH_DDB_SIZE		0x080000
 
 /* Flash Description Table */
 struct qla_fdt_layout {
@@ -348,6 +350,7 @@
 #define FLT_REG_BOOT_CODE_82	0x78
 #define FLT_REG_ISCSI_PARAM	0x65
 #define FLT_REG_ISCSI_CHAP	0x63
+#define FLT_REG_ISCSI_DDB	0x6A
 
 struct qla_flt_region {
 	uint32_t code;
@@ -490,12 +493,16 @@
 #define MBOX_ASTS_SUBNET_STATE_CHANGE		0x8027
 #define MBOX_ASTS_RESPONSE_QUEUE_FULL		0x8028
 #define MBOX_ASTS_IP_ADDR_STATE_CHANGED		0x8029
+#define MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED	0x802A
 #define MBOX_ASTS_IPV6_PREFIX_EXPIRED		0x802B
 #define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED	0x802C
 #define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED	0x802D
 #define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD		0x802E
+#define MBOX_ASTS_INITIALIZATION_FAILED		0x8031
+#define MBOX_ASTS_SYSTEM_WARNING_EVENT		0x8036
 #define MBOX_ASTS_IDC_COMPLETE			0x8100
 #define MBOX_ASTS_IDC_REQUEST_NOTIFICATION	0x8101
+#define MBOX_ASTS_DCBX_CONF_CHANGE		0x8110
 #define MBOX_ASTS_TXSCVR_INSERTED		0x8130
 #define MBOX_ASTS_TXSCVR_REMOVED		0x8131
 
@@ -779,12 +786,41 @@
 #define DDB_OPT_IPV6_NULL_LINK_LOCAL		0x800 /* post connection */
 #define DDB_OPT_IPV6_FW_DEFINED_LINK_LOCAL	0x800 /* pre connection */
 
+#define OPT_IS_FW_ASSIGNED_IPV6		11
+#define OPT_IPV6_DEVICE			8
+#define OPT_AUTO_SENDTGTS_DISABLE	6
+#define OPT_DISC_SESSION		4
+#define OPT_ENTRY_STATE			3
 	uint16_t exec_throttle;	/* 02-03 */
 	uint16_t exec_count;	/* 04-05 */
 	uint16_t res0;	/* 06-07 */
 	uint16_t iscsi_options;	/* 08-09 */
+#define ISCSIOPT_HEADER_DIGEST_EN		13
+#define ISCSIOPT_DATA_DIGEST_EN			12
+#define ISCSIOPT_IMMEDIATE_DATA_EN		11
+#define ISCSIOPT_INITIAL_R2T_EN			10
+#define ISCSIOPT_DATA_SEQ_IN_ORDER		9
+#define ISCSIOPT_DATA_PDU_IN_ORDER		8
+#define ISCSIOPT_CHAP_AUTH_EN			7
+#define ISCSIOPT_SNACK_REQ_EN			6
+#define ISCSIOPT_DISCOVERY_LOGOUT_EN		5
+#define ISCSIOPT_BIDI_CHAP_EN			4
+#define ISCSIOPT_DISCOVERY_AUTH_OPTIONAL	3
+#define ISCSIOPT_ERL1				1
+#define ISCSIOPT_ERL0				0
+
 	uint16_t tcp_options;	/* 0A-0B */
+#define TCPOPT_TIMESTAMP_STAT	6
+#define TCPOPT_NAGLE_DISABLE	5
+#define TCPOPT_WSF_DISABLE	4
+#define TCPOPT_TIMER_SCALE3	3
+#define TCPOPT_TIMER_SCALE2	2
+#define TCPOPT_TIMER_SCALE1	1
+#define TCPOPT_TIMESTAMP_EN	0
+
 	uint16_t ip_options;	/* 0C-0D */
+#define IPOPT_FRAGMENT_DISABLE	4
+
 	uint16_t iscsi_max_rcv_data_seg_len;	/* 0E-0F */
 #define BYTE_UNITS	512
 	uint32_t res1;	/* 10-13 */
@@ -816,6 +852,8 @@
 					 * much RAM */
 	uint8_t link_local_ipv6_addr[0x10]; /* 1A0-1AF */
 	uint8_t res5[0x10];	/* 1B0-1BF */
+#define DDB_NO_LINK	0xFFFF
+#define DDB_ISNS	0xFFFD
 	uint16_t ddb_link;	/* 1C0-1C1 */
 	uint16_t chap_tbl_idx;	/* 1C2-1C3 */
 	uint16_t tgt_portal_grp; /* 1C4-1C5 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 982293e..4a42800 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -191,6 +191,9 @@
 int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
 			       uint32_t status, uint32_t pid,
 			       uint32_t data_size, uint8_t *data);
+int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha,
+			     struct dev_db_entry *fw_ddb_entry,
+			     dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
 
 /* BSG Functions */
 int qla4xxx_bsg_request(struct bsg_job *bsg_job);
@@ -224,8 +227,6 @@
 int qla4_83xx_isp_reset(struct scsi_qla_host *ha);
 void qla4_83xx_queue_iocb(struct scsi_qla_host *ha);
 void qla4_83xx_complete_iocb(struct scsi_qla_host *ha);
-uint16_t qla4_83xx_rd_shdw_req_q_out(struct scsi_qla_host *ha);
-uint16_t qla4_83xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha);
 uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr);
 void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val);
 int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
@@ -261,6 +262,10 @@
 void qla4_83xx_disable_pause(struct scsi_qla_host *ha);
 void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha);
 int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha);
+int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
+			    dma_addr_t dma_addr);
+int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
+				  char *password, uint16_t chap_index);
 
 extern int ql4xextended_error_logging;
 extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 1b83dc2..482287f 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -396,7 +396,6 @@
 
 	task_data = task->dd_data;
 	memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status));
-	ha->req_q_count += task_data->iocb_req_cnt;
 	ha->iocb_cnt -= task_data->iocb_req_cnt;
 	queue_work(ha->task_wq, &task_data->task_work);
 }
@@ -416,7 +415,6 @@
 		return mrb;
 
 	/* update counters */
-	ha->req_q_count += mrb->iocb_cnt;
 	ha->iocb_cnt -= mrb->iocb_cnt;
 
 	return mrb;
@@ -877,6 +875,43 @@
 			}
 			break;
 
+		case MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED:
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+					  ha->host_no, mbox_sts[0], mbox_sts[1],
+					  mbox_sts[2], mbox_sts[3], mbox_sts[4],
+					  mbox_sts[5]));
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "scsi%ld: AEN %04x Received IPv6 default router changed notification\n",
+					  ha->host_no, mbox_sts[0]));
+			break;
+
+		case MBOX_ASTS_INITIALIZATION_FAILED:
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "scsi%ld: AEN %04x, mbox_sts[3]=%08x\n",
+					  ha->host_no, mbox_sts[0],
+					  mbox_sts[3]));
+			break;
+
+		case MBOX_ASTS_SYSTEM_WARNING_EVENT:
+			DEBUG2(ql4_printk(KERN_WARNING, ha,
+					  "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+					  ha->host_no, mbox_sts[0], mbox_sts[1],
+					  mbox_sts[2], mbox_sts[3], mbox_sts[4],
+					  mbox_sts[5]));
+			break;
+
+		case MBOX_ASTS_DCBX_CONF_CHANGE:
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+					  ha->host_no, mbox_sts[0], mbox_sts[1],
+					  mbox_sts[2], mbox_sts[3], mbox_sts[4],
+					  mbox_sts[5]));
+			DEBUG2(ql4_printk(KERN_INFO, ha,
+					  "scsi%ld: AEN %04x Received DCBX configuration changed notification\n",
+					  ha->host_no, mbox_sts[0]));
+			break;
+
 		default:
 			DEBUG2(printk(KERN_WARNING
 				      "scsi%ld: AEN %04x UNKNOWN\n",
@@ -1099,8 +1134,8 @@
 
 	status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG);
 	if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
-		DEBUG2(ql4_printk(KERN_INFO, ha,
-		    "%s legacy Int not triggered\n", __func__));
+		DEBUG7(ql4_printk(KERN_INFO, ha,
+				  "%s legacy Int not triggered\n", __func__));
 		return IRQ_NONE;
 	}
 
@@ -1158,7 +1193,7 @@
 
 	/* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
 	if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
-		DEBUG2(ql4_printk(KERN_ERR, ha,
+		DEBUG7(ql4_printk(KERN_ERR, ha,
 				  "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
 				  __func__));
 		return IRQ_NONE;
@@ -1166,7 +1201,7 @@
 
 	/* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
 	if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
-		DEBUG2(ql4_printk(KERN_ERR, ha,
+		DEBUG7(ql4_printk(KERN_ERR, ha,
 				  "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
 				  __func__, (leg_int_ptr & PF_BITS_MASK),
 				  ha->pf_bit));
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 160d336..a501bea 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1129,6 +1129,7 @@
 {
 	uint32_t mbox_cmd[MBOX_REG_COUNT];
 	uint32_t mbox_sts[MBOX_REG_COUNT];
+	uint32_t scsi_lun[2];
 	int status = QLA_SUCCESS;
 
 	DEBUG2(printk("scsi%ld:%d:%d: lun reset issued\n", ha->host_no,
@@ -1140,10 +1141,16 @@
 	 */
 	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
 	memset(&mbox_sts, 0, sizeof(mbox_sts));
+	int_to_scsilun(lun, (struct scsi_lun *) scsi_lun);
 
 	mbox_cmd[0] = MBOX_CMD_LUN_RESET;
 	mbox_cmd[1] = ddb_entry->fw_ddb_index;
-	mbox_cmd[2] = lun << 8;
+	/* FW expects LUN bytes 0-3 in Incoming Mailbox 2
+	 * (LUN byte 0 is LSByte, byte 3 is MSByte) */
+	mbox_cmd[2] = cpu_to_le32(scsi_lun[0]);
+	/* FW expects LUN bytes 4-7 in Incoming Mailbox 3
+	 * (LUN byte 4 is LSByte, byte 7 is MSByte) */
+	mbox_cmd[3] = cpu_to_le32(scsi_lun[1]);
 	mbox_cmd[5] = 0x01;	/* Immediate Command Enable */
 
 	qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]);
@@ -1281,8 +1288,8 @@
 	return status;
 }
 
-static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
-				   dma_addr_t dma_addr)
+int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
+			    dma_addr_t dma_addr)
 {
 	uint32_t mbox_cmd[MBOX_REG_COUNT];
 	uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -1410,6 +1417,55 @@
 	return status;
 }
 
+int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha,
+			     struct dev_db_entry *fw_ddb_entry,
+			     dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index)
+{
+	uint32_t dev_db_start_offset;
+	uint32_t dev_db_end_offset;
+	int status = QLA_ERROR;
+
+	memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
+
+	if (is_qla40XX(ha)) {
+		dev_db_start_offset = FLASH_OFFSET_DB_INFO;
+		dev_db_end_offset = FLASH_OFFSET_DB_END;
+	} else {
+		dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
+				      (ha->hw.flt_region_ddb << 2);
+		/* flt_ddb_size is DDB table size for both ports
+		 * so divide it by 2 to calculate the offset for second port
+		 */
+		if (ha->port_num == 1)
+			dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
+
+		dev_db_end_offset = dev_db_start_offset +
+				    (ha->hw.flt_ddb_size / 2);
+	}
+
+	dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry));
+
+	if (dev_db_start_offset > dev_db_end_offset) {
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s:Invalid DDB index %d", __func__,
+				  ddb_index));
+		goto exit_fdb_failed;
+	}
+
+	if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
+			      sizeof(*fw_ddb_entry)) != QLA_SUCCESS) {
+		ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash failed\n",
+			   ha->host_no, __func__);
+		goto exit_fdb_failed;
+	}
+
+	if (fw_ddb_entry->cookie == DDB_VALID_COOKIE)
+		status = QLA_SUCCESS;
+
+exit_fdb_failed:
+	return status;
+}
+
 int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
 		     uint16_t idx)
 {
@@ -1503,6 +1559,62 @@
 	return ret;
 }
 
+
+int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
+				  char *password, uint16_t chap_index)
+{
+	int rval = QLA_ERROR;
+	struct ql4_chap_table *chap_table = NULL;
+	int max_chap_entries;
+
+	if (!ha->chap_list) {
+		ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
+		rval = QLA_ERROR;
+		goto exit_uni_chap;
+	}
+
+	if (!username || !password) {
+		ql4_printk(KERN_ERR, ha, "No memory for username & secret\n");
+		rval = QLA_ERROR;
+		goto exit_uni_chap;
+	}
+
+	if (is_qla80XX(ha))
+		max_chap_entries = (ha->hw.flt_chap_size / 2) /
+				   sizeof(struct ql4_chap_table);
+	else
+		max_chap_entries = MAX_CHAP_ENTRIES_40XX;
+
+	if (chap_index > max_chap_entries) {
+		ql4_printk(KERN_ERR, ha, "Invalid Chap index\n");
+		rval = QLA_ERROR;
+		goto exit_uni_chap;
+	}
+
+	mutex_lock(&ha->chap_sem);
+	chap_table = (struct ql4_chap_table *)ha->chap_list + chap_index;
+	if (chap_table->cookie != __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
+		rval = QLA_ERROR;
+		goto exit_unlock_uni_chap;
+	}
+
+	if (!(chap_table->flags & BIT_6)) {
+		ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n");
+		rval = QLA_ERROR;
+		goto exit_unlock_uni_chap;
+	}
+
+	strncpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN);
+	strncpy(username, chap_table->name, MAX_CHAP_NAME_LEN);
+
+	rval = QLA_SUCCESS;
+
+exit_unlock_uni_chap:
+	mutex_unlock(&ha->chap_sem);
+exit_uni_chap:
+	return rval;
+}
+
 /**
  * qla4xxx_get_chap_index - Get chap index given username and secret
  * @ha: pointer to adapter structure
@@ -1524,7 +1636,7 @@
 	int max_chap_entries = 0;
 	struct ql4_chap_table *chap_table;
 
-	if (is_qla8022(ha))
+	if (is_qla80XX(ha))
 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
 						sizeof(struct ql4_chap_table);
 	else
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 9299400..eaf00c1 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -3154,6 +3154,10 @@
 			hw->flt_region_chap =  start;
 			hw->flt_chap_size =  le32_to_cpu(region->size);
 			break;
+		case FLT_REG_ISCSI_DDB:
+			hw->flt_region_ddb =  start;
+			hw->flt_ddb_size =  le32_to_cpu(region->size);
+			break;
 		}
 	}
 	goto done;
@@ -3166,14 +3170,19 @@
 	hw->flt_region_boot     = FA_BOOT_CODE_ADDR_82;
 	hw->flt_region_bootload = FA_BOOT_LOAD_ADDR_82;
 	hw->flt_region_fw       = FA_RISC_CODE_ADDR_82;
-	hw->flt_region_chap	= FA_FLASH_ISCSI_CHAP;
+	hw->flt_region_chap	= FA_FLASH_ISCSI_CHAP >> 2;
 	hw->flt_chap_size	= FA_FLASH_CHAP_SIZE;
+	hw->flt_region_ddb	= FA_FLASH_ISCSI_DDB >> 2;
+	hw->flt_ddb_size	= FA_FLASH_DDB_SIZE;
 
 done:
-	DEBUG2(ql4_printk(KERN_INFO, ha, "FLT[%s]: flt=0x%x fdt=0x%x "
-	    "boot=0x%x bootload=0x%x fw=0x%x\n", loc, hw->flt_region_flt,
-	    hw->flt_region_fdt,	hw->flt_region_boot, hw->flt_region_bootload,
-	    hw->flt_region_fw));
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "FLT[%s]: flt=0x%x fdt=0x%x boot=0x%x bootload=0x%x fw=0x%x chap=0x%x chap_size=0x%x ddb=0x%x  ddb_size=0x%x\n",
+			  loc, hw->flt_region_flt, hw->flt_region_fdt,
+			  hw->flt_region_boot, hw->flt_region_bootload,
+			  hw->flt_region_fw, hw->flt_region_chap,
+			  hw->flt_chap_size, hw->flt_region_ddb,
+			  hw->flt_ddb_size));
 }
 
 static void
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 6142729..a47f999 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -166,6 +166,26 @@
 static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
 				      int reason);
 
+/*
+ * iSCSI Flash DDB sysfs entry points
+ */
+static int
+qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
+			    struct iscsi_bus_flash_conn *fnode_conn,
+			    void *data, int len);
+static int
+qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
+			    int param, char *buf);
+static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
+				 int len);
+static int
+qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess);
+static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
+				   struct iscsi_bus_flash_conn *fnode_conn);
+static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
+				    struct iscsi_bus_flash_conn *fnode_conn);
+static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess);
+
 static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
     QLA82XX_LEGACY_INTR_CONFIG;
 
@@ -232,6 +252,13 @@
 	.send_ping		= qla4xxx_send_ping,
 	.get_chap		= qla4xxx_get_chap_list,
 	.delete_chap		= qla4xxx_delete_chap,
+	.get_flashnode_param	= qla4xxx_sysfs_ddb_get_param,
+	.set_flashnode_param	= qla4xxx_sysfs_ddb_set_param,
+	.new_flashnode		= qla4xxx_sysfs_ddb_add,
+	.del_flashnode		= qla4xxx_sysfs_ddb_delete,
+	.login_flashnode	= qla4xxx_sysfs_ddb_login,
+	.logout_flashnode	= qla4xxx_sysfs_ddb_logout,
+	.logout_flashnode_sid	= qla4xxx_sysfs_ddb_logout_sid,
 };
 
 static struct scsi_transport_template *qla4xxx_scsi_transport;
@@ -376,6 +403,68 @@
 		default:
 			return 0;
 		}
+	case ISCSI_FLASHNODE_PARAM:
+		switch (param) {
+		case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
+		case ISCSI_FLASHNODE_PORTAL_TYPE:
+		case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
+		case ISCSI_FLASHNODE_DISCOVERY_SESS:
+		case ISCSI_FLASHNODE_ENTRY_EN:
+		case ISCSI_FLASHNODE_HDR_DGST_EN:
+		case ISCSI_FLASHNODE_DATA_DGST_EN:
+		case ISCSI_FLASHNODE_IMM_DATA_EN:
+		case ISCSI_FLASHNODE_INITIAL_R2T_EN:
+		case ISCSI_FLASHNODE_DATASEQ_INORDER:
+		case ISCSI_FLASHNODE_PDU_INORDER:
+		case ISCSI_FLASHNODE_CHAP_AUTH_EN:
+		case ISCSI_FLASHNODE_SNACK_REQ_EN:
+		case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
+		case ISCSI_FLASHNODE_BIDI_CHAP_EN:
+		case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
+		case ISCSI_FLASHNODE_ERL:
+		case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
+		case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
+		case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
+		case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
+		case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
+		case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
+		case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
+		case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
+		case ISCSI_FLASHNODE_FIRST_BURST:
+		case ISCSI_FLASHNODE_DEF_TIME2WAIT:
+		case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
+		case ISCSI_FLASHNODE_MAX_R2T:
+		case ISCSI_FLASHNODE_KEEPALIVE_TMO:
+		case ISCSI_FLASHNODE_ISID:
+		case ISCSI_FLASHNODE_TSID:
+		case ISCSI_FLASHNODE_PORT:
+		case ISCSI_FLASHNODE_MAX_BURST:
+		case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
+		case ISCSI_FLASHNODE_IPADDR:
+		case ISCSI_FLASHNODE_ALIAS:
+		case ISCSI_FLASHNODE_REDIRECT_IPADDR:
+		case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
+		case ISCSI_FLASHNODE_LOCAL_PORT:
+		case ISCSI_FLASHNODE_IPV4_TOS:
+		case ISCSI_FLASHNODE_IPV6_TC:
+		case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
+		case ISCSI_FLASHNODE_NAME:
+		case ISCSI_FLASHNODE_TPGT:
+		case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
+		case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
+		case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
+		case ISCSI_FLASHNODE_TCP_XMIT_WSF:
+		case ISCSI_FLASHNODE_TCP_RECV_WSF:
+		case ISCSI_FLASHNODE_CHAP_OUT_IDX:
+		case ISCSI_FLASHNODE_USERNAME:
+		case ISCSI_FLASHNODE_PASSWORD:
+		case ISCSI_FLASHNODE_STATSN:
+		case ISCSI_FLASHNODE_EXP_STATSN:
+		case ISCSI_FLASHNODE_IS_BOOT_TGT:
+			return S_IRUGO;
+		default:
+			return 0;
+		}
 	}
 
 	return 0;
@@ -391,7 +480,7 @@
 	int valid_chap_entries = 0;
 	int ret = 0, i;
 
-	if (is_qla8022(ha))
+	if (is_qla80XX(ha))
 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
 					sizeof(struct ql4_chap_table);
 	else
@@ -495,7 +584,7 @@
 
 	memset(chap_table, 0, sizeof(struct ql4_chap_table));
 
-	if (is_qla8022(ha))
+	if (is_qla80XX(ha))
 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
 				   sizeof(struct ql4_chap_table);
 	else
@@ -1922,6 +2011,252 @@
 	return -ENOSYS;
 }
 
+static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
+					 struct iscsi_bus_flash_conn *conn,
+					 struct dev_db_entry *fw_ddb_entry)
+{
+	unsigned long options = 0;
+	int rc = 0;
+
+	options = le16_to_cpu(fw_ddb_entry->options);
+	conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
+	if (test_bit(OPT_IPV6_DEVICE, &options)) {
+		rc = iscsi_switch_str_param(&sess->portal_type,
+					    PORTAL_TYPE_IPV6);
+		if (rc)
+			goto exit_copy;
+	} else {
+		rc = iscsi_switch_str_param(&sess->portal_type,
+					    PORTAL_TYPE_IPV4);
+		if (rc)
+			goto exit_copy;
+	}
+
+	sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
+					      &options);
+	sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
+	sess->entry_state = test_bit(OPT_ENTRY_STATE, &options);
+
+	options = le16_to_cpu(fw_ddb_entry->iscsi_options);
+	conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
+	conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
+	sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
+	sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
+	sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
+					    &options);
+	sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
+	sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
+	conn->snack_req_en = test_bit(ISCSIOPT_SNACK_REQ_EN, &options);
+	sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
+					     &options);
+	sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
+	sess->discovery_auth_optional =
+			test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
+	if (test_bit(ISCSIOPT_ERL1, &options))
+		sess->erl |= BIT_1;
+	if (test_bit(ISCSIOPT_ERL0, &options))
+		sess->erl |= BIT_0;
+
+	options = le16_to_cpu(fw_ddb_entry->tcp_options);
+	conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
+	conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
+	conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
+	if (test_bit(TCPOPT_TIMER_SCALE3, &options))
+		conn->tcp_timer_scale |= BIT_3;
+	if (test_bit(TCPOPT_TIMER_SCALE2, &options))
+		conn->tcp_timer_scale |= BIT_2;
+	if (test_bit(TCPOPT_TIMER_SCALE1, &options))
+		conn->tcp_timer_scale |= BIT_1;
+
+	conn->tcp_timer_scale >>= 1;
+	conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
+
+	options = le16_to_cpu(fw_ddb_entry->ip_options);
+	conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
+
+	conn->max_recv_dlength = BYTE_UNITS *
+			  le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+	conn->max_xmit_dlength = BYTE_UNITS *
+			  le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+	sess->first_burst = BYTE_UNITS *
+			       le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+	sess->max_burst = BYTE_UNITS *
+				 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+	sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+	sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+	sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+	sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+	conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
+	conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
+	conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
+	conn->ipv6_flow_label = le16_to_cpu(fw_ddb_entry->ipv6_flow_lbl);
+	conn->keepalive_timeout = le16_to_cpu(fw_ddb_entry->ka_timeout);
+	conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
+	conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
+	conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
+	sess->discovery_parent_idx = le16_to_cpu(fw_ddb_entry->ddb_link);
+	sess->discovery_parent_type = le16_to_cpu(fw_ddb_entry->ddb_link);
+	sess->chap_out_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
+	sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
+
+	sess->default_taskmgmt_timeout =
+				le16_to_cpu(fw_ddb_entry->def_timeout);
+	conn->port = le16_to_cpu(fw_ddb_entry->port);
+
+	options = le16_to_cpu(fw_ddb_entry->options);
+	conn->ipaddress = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
+	if (!conn->ipaddress) {
+		rc = -ENOMEM;
+		goto exit_copy;
+	}
+
+	conn->redirect_ipaddr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
+	if (!conn->redirect_ipaddr) {
+		rc = -ENOMEM;
+		goto exit_copy;
+	}
+
+	memcpy(conn->ipaddress, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
+	memcpy(conn->redirect_ipaddr, fw_ddb_entry->tgt_addr, IPv6_ADDR_LEN);
+
+	if (test_bit(OPT_IPV6_DEVICE, &options)) {
+		conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
+
+		conn->link_local_ipv6_addr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
+		if (!conn->link_local_ipv6_addr) {
+			rc = -ENOMEM;
+			goto exit_copy;
+		}
+
+		memcpy(conn->link_local_ipv6_addr,
+		       fw_ddb_entry->link_local_ipv6_addr, IPv6_ADDR_LEN);
+	} else {
+		conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
+	}
+
+	if (fw_ddb_entry->iscsi_name[0]) {
+		rc = iscsi_switch_str_param(&sess->targetname,
+					    (char *)fw_ddb_entry->iscsi_name);
+		if (rc)
+			goto exit_copy;
+	}
+
+	if (fw_ddb_entry->iscsi_alias[0]) {
+		rc = iscsi_switch_str_param(&sess->targetalias,
+					    (char *)fw_ddb_entry->iscsi_alias);
+		if (rc)
+			goto exit_copy;
+	}
+
+	COPY_ISID(sess->isid, fw_ddb_entry->isid);
+
+exit_copy:
+	return rc;
+}
+
+static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
+				       struct iscsi_bus_flash_conn *conn,
+				       struct dev_db_entry *fw_ddb_entry)
+{
+	uint16_t options;
+	int rc = 0;
+
+	options = le16_to_cpu(fw_ddb_entry->options);
+	SET_BITVAL(conn->is_fw_assigned_ipv6,  options, BIT_11);
+	if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
+		options |= BIT_8;
+	else
+		options &= ~BIT_8;
+
+	SET_BITVAL(sess->auto_snd_tgt_disable, options, BIT_6);
+	SET_BITVAL(sess->discovery_sess, options, BIT_4);
+	SET_BITVAL(sess->entry_state, options, BIT_3);
+	fw_ddb_entry->options = cpu_to_le16(options);
+
+	options = le16_to_cpu(fw_ddb_entry->iscsi_options);
+	SET_BITVAL(conn->hdrdgst_en, options, BIT_13);
+	SET_BITVAL(conn->datadgst_en, options, BIT_12);
+	SET_BITVAL(sess->imm_data_en, options, BIT_11);
+	SET_BITVAL(sess->initial_r2t_en, options, BIT_10);
+	SET_BITVAL(sess->dataseq_inorder_en, options, BIT_9);
+	SET_BITVAL(sess->pdu_inorder_en, options, BIT_8);
+	SET_BITVAL(sess->chap_auth_en, options, BIT_7);
+	SET_BITVAL(conn->snack_req_en, options, BIT_6);
+	SET_BITVAL(sess->discovery_logout_en, options, BIT_5);
+	SET_BITVAL(sess->bidi_chap_en, options, BIT_4);
+	SET_BITVAL(sess->discovery_auth_optional, options, BIT_3);
+	SET_BITVAL(sess->erl & BIT_1, options, BIT_1);
+	SET_BITVAL(sess->erl & BIT_0, options, BIT_0);
+	fw_ddb_entry->iscsi_options = cpu_to_le16(options);
+
+	options = le16_to_cpu(fw_ddb_entry->tcp_options);
+	SET_BITVAL(conn->tcp_timestamp_stat, options, BIT_6);
+	SET_BITVAL(conn->tcp_nagle_disable, options, BIT_5);
+	SET_BITVAL(conn->tcp_wsf_disable, options, BIT_4);
+	SET_BITVAL(conn->tcp_timer_scale & BIT_2, options, BIT_3);
+	SET_BITVAL(conn->tcp_timer_scale & BIT_1, options, BIT_2);
+	SET_BITVAL(conn->tcp_timer_scale & BIT_0, options, BIT_1);
+	SET_BITVAL(conn->tcp_timestamp_en, options, BIT_0);
+	fw_ddb_entry->tcp_options = cpu_to_le16(options);
+
+	options = le16_to_cpu(fw_ddb_entry->ip_options);
+	SET_BITVAL(conn->fragment_disable, options, BIT_4);
+	fw_ddb_entry->ip_options = cpu_to_le16(options);
+
+	fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
+	fw_ddb_entry->iscsi_max_rcv_data_seg_len =
+			       cpu_to_le16(conn->max_recv_dlength / BYTE_UNITS);
+	fw_ddb_entry->iscsi_max_snd_data_seg_len =
+			       cpu_to_le16(conn->max_xmit_dlength / BYTE_UNITS);
+	fw_ddb_entry->iscsi_first_burst_len =
+				cpu_to_le16(sess->first_burst / BYTE_UNITS);
+	fw_ddb_entry->iscsi_max_burst_len = cpu_to_le16(sess->max_burst /
+					    BYTE_UNITS);
+	fw_ddb_entry->iscsi_def_time2wait = cpu_to_le16(sess->time2wait);
+	fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain);
+	fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt);
+	fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
+	fw_ddb_entry->tcp_xmt_wsf = cpu_to_le16(conn->tcp_xmit_wsf);
+	fw_ddb_entry->tcp_rcv_wsf = cpu_to_le16(conn->tcp_recv_wsf);
+	fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
+	fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
+	fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
+	fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
+	fw_ddb_entry->stat_sn = cpu_to_le16(conn->statsn);
+	fw_ddb_entry->exp_stat_sn = cpu_to_le16(conn->exp_statsn);
+	fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type);
+	fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
+	fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
+	fw_ddb_entry->port = cpu_to_le16(conn->port);
+	fw_ddb_entry->def_timeout =
+				cpu_to_le16(sess->default_taskmgmt_timeout);
+
+	if (conn->ipaddress)
+		memcpy(fw_ddb_entry->ip_addr, conn->ipaddress,
+		       sizeof(fw_ddb_entry->ip_addr));
+
+	if (conn->redirect_ipaddr)
+		memcpy(fw_ddb_entry->tgt_addr, conn->redirect_ipaddr,
+		       sizeof(fw_ddb_entry->tgt_addr));
+
+	if (conn->link_local_ipv6_addr)
+		memcpy(fw_ddb_entry->link_local_ipv6_addr,
+		       conn->link_local_ipv6_addr,
+		       sizeof(fw_ddb_entry->link_local_ipv6_addr));
+
+	if (sess->targetname)
+		memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
+		       sizeof(fw_ddb_entry->iscsi_name));
+
+	if (sess->targetalias)
+		memcpy(fw_ddb_entry->iscsi_alias, sess->targetalias,
+		       sizeof(fw_ddb_entry->iscsi_alias));
+
+	COPY_ISID(fw_ddb_entry->isid, sess->isid);
+
+	return rc;
+}
+
 static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
 				     struct dev_db_entry *fw_ddb_entry,
 				     struct iscsi_cls_session *cls_sess,
@@ -2543,6 +2878,7 @@
 void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
 {
 	uint32_t dev_state;
+	uint32_t idc_ctrl;
 
 	/* don't poll if reset is going on */
 	if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
@@ -2561,10 +2897,23 @@
 			qla4xxx_wake_dpc(ha);
 		} else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
 			   !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+
+			ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n",
+				   __func__);
+
+			if (is_qla8032(ha)) {
+				idc_ctrl = qla4_83xx_rd_reg(ha,
+							QLA83XX_IDC_DRV_CTRL);
+				if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) {
+					ql4_printk(KERN_INFO, ha, "%s: Graceful reset bit is not set\n",
+						   __func__);
+					qla4xxx_mailbox_premature_completion(
+									    ha);
+				}
+			}
+
 			if (is_qla8032(ha) ||
 			    (is_qla8022(ha) && !ql4xdontresethba)) {
-				ql4_printk(KERN_INFO, ha, "%s: HW State: "
-				    "NEED RESET!\n", __func__);
 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
 				qla4xxx_wake_dpc(ha);
 			}
@@ -3737,8 +4086,8 @@
 	.reset_firmware		= qla4_8xxx_stop_firmware,
 	.queue_iocb		= qla4_83xx_queue_iocb,
 	.complete_iocb		= qla4_83xx_complete_iocb,
-	.rd_shdw_req_q_out	= qla4_83xx_rd_shdw_req_q_out,
-	.rd_shdw_rsp_q_in	= qla4_83xx_rd_shdw_rsp_q_in,
+	.rd_shdw_req_q_out	= qla4xxx_rd_shdw_req_q_out,
+	.rd_shdw_rsp_q_in	= qla4xxx_rd_shdw_rsp_q_in,
 	.get_sys_info		= qla4_8xxx_get_sys_info,
 	.rd_reg_direct		= qla4_83xx_rd_reg,
 	.wr_reg_direct		= qla4_83xx_wr_reg,
@@ -3761,11 +4110,6 @@
 	return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->req_q_out));
 }
 
-uint16_t qla4_83xx_rd_shdw_req_q_out(struct scsi_qla_host *ha)
-{
-	return (uint16_t)le32_to_cpu(readl(&ha->qla4_83xx_reg->req_q_out));
-}
-
 uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
 {
 	return (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in);
@@ -3776,11 +4120,6 @@
 	return (uint16_t)le32_to_cpu(readl(&ha->qla4_82xx_reg->rsp_q_in));
 }
 
-uint16_t qla4_83xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
-{
-	return (uint16_t)le32_to_cpu(readl(&ha->qla4_83xx_reg->rsp_q_in));
-}
-
 static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
 {
 	struct scsi_qla_host *ha = data;
@@ -4005,7 +4344,7 @@
 		if (val & BIT_7)
 			ddb_index[1] = (val & 0x7f);
 
-	} else if (is_qla8022(ha)) {
+	} else if (is_qla80XX(ha)) {
 		buf = dma_alloc_coherent(&ha->pdev->dev, size,
 					 &buf_dma, GFP_KERNEL);
 		if (!buf) {
@@ -4083,7 +4422,7 @@
 	int max_chap_entries = 0;
 	struct ql4_chap_table *chap_table;
 
-	if (is_qla8022(ha))
+	if (is_qla80XX(ha))
 		max_chap_entries = (ha->hw.flt_chap_size / 2) /
 						sizeof(struct ql4_chap_table);
 	else
@@ -5058,6 +5397,1342 @@
 		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
 }
 
+static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,
+				      struct list_head *list_nt)
+{
+	struct dev_db_entry *fw_ddb_entry;
+	dma_addr_t fw_ddb_dma;
+	int max_ddbs;
+	int fw_idx_size;
+	int ret;
+	uint32_t idx = 0, next_idx = 0;
+	uint32_t state = 0, conn_err = 0;
+	uint16_t conn_id = 0;
+	struct qla_ddb_index  *nt_ddb_idx;
+
+	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+				      &fw_ddb_dma);
+	if (fw_ddb_entry == NULL) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+		goto exit_new_nt_list;
+	}
+	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+				     MAX_DEV_DB_ENTRIES;
+	fw_idx_size = sizeof(struct qla_ddb_index);
+
+	for (idx = 0; idx < max_ddbs; idx = next_idx) {
+		ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+					      NULL, &next_idx, &state,
+					      &conn_err, NULL, &conn_id);
+		if (ret == QLA_ERROR)
+			break;
+
+		/* Check if NT, then add it to list */
+		if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
+			goto continue_next_new_nt;
+
+		if (!(state == DDB_DS_NO_CONNECTION_ACTIVE))
+			goto continue_next_new_nt;
+
+		DEBUG2(ql4_printk(KERN_INFO, ha,
+				  "Adding  DDB to session = 0x%x\n", idx));
+
+		nt_ddb_idx = vmalloc(fw_idx_size);
+		if (!nt_ddb_idx)
+			break;
+
+		nt_ddb_idx->fw_ddb_idx = idx;
+
+		ret = qla4xxx_is_session_exists(ha, fw_ddb_entry);
+		if (ret == QLA_SUCCESS) {
+			/* free nt_ddb_idx and do not add to list_nt */
+			vfree(nt_ddb_idx);
+			goto continue_next_new_nt;
+		}
+
+		list_add_tail(&nt_ddb_idx->list, list_nt);
+
+		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
+					      idx);
+		if (ret == QLA_ERROR)
+			goto exit_new_nt_list;
+
+continue_next_new_nt:
+		if (next_idx == 0)
+			break;
+	}
+
+exit_new_nt_list:
+	if (fw_ddb_entry)
+		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_sysfs_ddb_is_non_persistent - check for non-persistence of ddb entry
+ * @dev: dev associated with the sysfs entry
+ * @data: pointer to flashnode session object
+ *
+ * Returns:
+ *	1: if flashnode entry is non-persistent
+ *	0: if flashnode entry is persistent
+ **/
+static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data)
+{
+	struct iscsi_bus_flash_session *fnode_sess;
+
+	if (!iscsi_flashnode_bus_match(dev, NULL))
+		return 0;
+
+	fnode_sess = iscsi_dev_to_flash_session(dev);
+
+	return (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT);
+}
+
+/**
+ * qla4xxx_sysfs_ddb_tgt_create - Create sysfs entry for target
+ * @ha: pointer to host
+ * @fw_ddb_entry: flash ddb data
+ * @idx: target index
+ * @user: if set then this call is made from userland else from kernel
+ *
+ * Returns:
+ * On sucess: QLA_SUCCESS
+ * On failure: QLA_ERROR
+ *
+ * This create separate sysfs entries for session and connection attributes of
+ * the given fw ddb entry.
+ * If this is invoked as a result of a userspace call then the entry is marked
+ * as nonpersistent using flash_state field.
+ **/
+int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
+				 struct dev_db_entry *fw_ddb_entry,
+				 uint16_t *idx, int user)
+{
+	struct iscsi_bus_flash_session *fnode_sess = NULL;
+	struct iscsi_bus_flash_conn *fnode_conn = NULL;
+	int rc = QLA_ERROR;
+
+	fnode_sess = iscsi_create_flashnode_sess(ha->host, *idx,
+						 &qla4xxx_iscsi_transport, 0);
+	if (!fnode_sess) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n",
+			   __func__, *idx, ha->host_no);
+		goto exit_tgt_create;
+	}
+
+	fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess,
+						 &qla4xxx_iscsi_transport, 0);
+	if (!fnode_conn) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n",
+			   __func__, *idx, ha->host_no);
+		goto free_sess;
+	}
+
+	if (user) {
+		fnode_sess->flash_state = DEV_DB_NON_PERSISTENT;
+	} else {
+		fnode_sess->flash_state = DEV_DB_PERSISTENT;
+
+		if (*idx == ha->pri_ddb_idx || *idx == ha->sec_ddb_idx)
+			fnode_sess->is_boot_target = 1;
+		else
+			fnode_sess->is_boot_target = 0;
+	}
+
+	rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
+					   fw_ddb_entry);
+
+	ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
+		   __func__, fnode_sess->dev.kobj.name);
+
+	ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
+		   __func__, fnode_conn->dev.kobj.name);
+
+	return QLA_SUCCESS;
+
+free_sess:
+	iscsi_destroy_flashnode_sess(fnode_sess);
+
+exit_tgt_create:
+	return QLA_ERROR;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_add - Add new ddb entry in flash
+ * @shost: pointer to host
+ * @buf: type of ddb entry (ipv4/ipv6)
+ * @len: length of buf
+ *
+ * This creates new ddb entry in the flash by finding first free index and
+ * storing default ddb there. And then create sysfs entry for the new ddb entry.
+ **/
+static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
+				 int len)
+{
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	struct device *dev;
+	uint16_t idx = 0;
+	uint16_t max_ddbs = 0;
+	uint32_t options = 0;
+	uint32_t rval = QLA_ERROR;
+
+	if (strncasecmp(PORTAL_TYPE_IPV4, buf, 4) &&
+	    strncasecmp(PORTAL_TYPE_IPV6, buf, 4)) {
+		DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Invalid portal type\n",
+				  __func__));
+		goto exit_ddb_add;
+	}
+
+	max_ddbs =  is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+				     MAX_DEV_DB_ENTRIES;
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Unable to allocate dma buffer\n",
+				  __func__));
+		goto exit_ddb_add;
+	}
+
+	dev = iscsi_find_flashnode_sess(ha->host, NULL,
+					qla4xxx_sysfs_ddb_is_non_persistent);
+	if (dev) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: A non-persistent entry %s found\n",
+			   __func__, dev->kobj.name);
+		goto exit_ddb_add;
+	}
+
+	for (idx = 0; idx < max_ddbs; idx++) {
+		if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry,
+					     fw_ddb_entry_dma, idx))
+			break;
+	}
+
+	if (idx == max_ddbs)
+		goto exit_ddb_add;
+
+	if (!strncasecmp("ipv6", buf, 4))
+		options |= IPV6_DEFAULT_DDB_ENTRY;
+
+	rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
+	if (rval == QLA_ERROR)
+		goto exit_ddb_add;
+
+	rval = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 1);
+
+exit_ddb_add:
+	if (fw_ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+				  fw_ddb_entry, fw_ddb_entry_dma);
+	if (rval == QLA_SUCCESS)
+		return idx;
+	else
+		return -EIO;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_apply - write the target ddb contents to Flash
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ * @fnode_conn: pointer to connection attrs of flash ddb entry
+ *
+ * This writes the contents of target ddb buffer to Flash with a valid cookie
+ * value in order to make the ddb entry persistent.
+ **/
+static int  qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess,
+				    struct iscsi_bus_flash_conn *fnode_conn)
+{
+	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	uint32_t options = 0;
+	int rval = 0;
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Unable to allocate dma buffer\n",
+				  __func__));
+		rval = -ENOMEM;
+		goto exit_ddb_apply;
+	}
+
+	if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+		options |= IPV6_DEFAULT_DDB_ENTRY;
+
+	rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
+	if (rval == QLA_ERROR)
+		goto exit_ddb_apply;
+
+	dev_db_start_offset += (fnode_sess->target_id *
+				sizeof(*fw_ddb_entry));
+
+	qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
+	fw_ddb_entry->cookie = DDB_VALID_COOKIE;
+
+	rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
+				 sizeof(*fw_ddb_entry), FLASH_OPT_RMW_COMMIT);
+
+	if (rval == QLA_SUCCESS) {
+		fnode_sess->flash_state = DEV_DB_PERSISTENT;
+		ql4_printk(KERN_INFO, ha,
+			   "%s: flash node %u of host %lu written to flash\n",
+			   __func__, fnode_sess->target_id, ha->host_no);
+	} else {
+		rval = -EIO;
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Error while writing flash node %u of host %lu to flash\n",
+			   __func__, fnode_sess->target_id, ha->host_no);
+	}
+
+exit_ddb_apply:
+	if (fw_ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+				  fw_ddb_entry, fw_ddb_entry_dma);
+	return rval;
+}
+
+static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha,
+					   struct dev_db_entry *fw_ddb_entry,
+					   uint16_t idx)
+{
+	struct dev_db_entry *ddb_entry = NULL;
+	dma_addr_t ddb_entry_dma;
+	unsigned long wtime;
+	uint32_t mbx_sts = 0;
+	uint32_t state = 0, conn_err = 0;
+	uint16_t tmo = 0;
+	int ret = 0;
+
+	ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
+				       &ddb_entry_dma, GFP_KERNEL);
+	if (!ddb_entry) {
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Unable to allocate dma buffer\n",
+				  __func__));
+		return QLA_ERROR;
+	}
+
+	memcpy(ddb_entry, fw_ddb_entry, sizeof(*ddb_entry));
+
+	ret = qla4xxx_set_ddb_entry(ha, idx, ddb_entry_dma, &mbx_sts);
+	if (ret != QLA_SUCCESS) {
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Unable to set ddb entry for index %d\n",
+				  __func__, idx));
+		goto exit_ddb_conn_open;
+	}
+
+	qla4xxx_conn_open(ha, idx);
+
+	/* To ensure that sendtargets is done, wait for at least 12 secs */
+	tmo = ((ha->def_timeout > LOGIN_TOV) &&
+	       (ha->def_timeout < LOGIN_TOV * 10) ?
+	       ha->def_timeout : LOGIN_TOV);
+
+	DEBUG2(ql4_printk(KERN_INFO, ha,
+			  "Default time to wait for login to ddb %d\n", tmo));
+
+	wtime = jiffies + (HZ * tmo);
+	do {
+		ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
+					      NULL, &state, &conn_err, NULL,
+					      NULL);
+		if (ret == QLA_ERROR)
+			continue;
+
+		if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+		    state == DDB_DS_SESSION_FAILED)
+			break;
+
+		schedule_timeout_uninterruptible(HZ / 10);
+	} while (time_after(wtime, jiffies));
+
+exit_ddb_conn_open:
+	if (ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*ddb_entry),
+				  ddb_entry, ddb_entry_dma);
+	return ret;
+}
+
+static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,
+				struct dev_db_entry *fw_ddb_entry)
+{
+	struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
+	struct list_head list_nt;
+	uint16_t ddb_index;
+	int ret = 0;
+
+	if (test_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags)) {
+		ql4_printk(KERN_WARNING, ha,
+			   "%s: A discovery already in progress!\n", __func__);
+		return QLA_ERROR;
+	}
+
+	INIT_LIST_HEAD(&list_nt);
+
+	set_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
+
+	ret = qla4xxx_get_ddb_index(ha, &ddb_index);
+	if (ret == QLA_ERROR)
+		goto exit_login_st_clr_bit;
+
+	ret = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, ddb_index);
+	if (ret == QLA_ERROR)
+		goto exit_login_st;
+
+	qla4xxx_build_new_nt_list(ha, &list_nt);
+
+	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {
+		list_del_init(&ddb_idx->list);
+		qla4xxx_clear_ddb_entry(ha, ddb_idx->fw_ddb_idx);
+		vfree(ddb_idx);
+	}
+
+exit_login_st:
+	if (qla4xxx_clear_ddb_entry(ha, ddb_index) == QLA_ERROR) {
+		ql4_printk(KERN_ERR, ha,
+			   "Unable to clear DDB index = 0x%x\n", ddb_index);
+	}
+
+	clear_bit(ddb_index, ha->ddb_idx_map);
+
+exit_login_st_clr_bit:
+	clear_bit(AF_ST_DISCOVERY_IN_PROGRESS, &ha->flags);
+	return ret;
+}
+
+static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,
+				struct dev_db_entry *fw_ddb_entry,
+				uint16_t idx)
+{
+	int ret = QLA_ERROR;
+
+	ret = qla4xxx_is_session_exists(ha, fw_ddb_entry);
+	if (ret != QLA_SUCCESS)
+		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,
+					      idx);
+	else
+		ret = -EPERM;
+
+	return ret;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_login - Login to the specified target
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ * @fnode_conn: pointer to connection attrs of flash ddb entry
+ *
+ * This logs in to the specified target
+ **/
+static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,
+				   struct iscsi_bus_flash_conn *fnode_conn)
+{
+	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	uint32_t options = 0;
+	int ret = 0;
+
+	if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Target info is not persistent\n", __func__);
+		ret = -EIO;
+		goto exit_ddb_login;
+	}
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Unable to allocate dma buffer\n",
+				  __func__));
+		ret = -ENOMEM;
+		goto exit_ddb_login;
+	}
+
+	if (!strncasecmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+		options |= IPV6_DEFAULT_DDB_ENTRY;
+
+	ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
+	if (ret == QLA_ERROR)
+		goto exit_ddb_login;
+
+	qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry);
+	fw_ddb_entry->cookie = DDB_VALID_COOKIE;
+
+	if (strlen((char *)fw_ddb_entry->iscsi_name) == 0)
+		ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry);
+	else
+		ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,
+					   fnode_sess->target_id);
+
+	if (ret > 0)
+		ret = -EIO;
+
+exit_ddb_login:
+	if (fw_ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+				  fw_ddb_entry, fw_ddb_entry_dma);
+	return ret;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_logout_sid - Logout session for the specified target
+ * @cls_sess: pointer to session to be logged out
+ *
+ * This performs session log out from the specified target
+ **/
+static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
+{
+	struct iscsi_session *sess;
+	struct ddb_entry *ddb_entry = NULL;
+	struct scsi_qla_host *ha;
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	unsigned long flags;
+	unsigned long wtime;
+	uint32_t ddb_state;
+	int options;
+	int ret = 0;
+
+	sess = cls_sess->dd_data;
+	ddb_entry = sess->dd_data;
+	ha = ddb_entry->ha;
+
+	if (ddb_entry->ddb_type != FLASH_DDB) {
+		ql4_printk(KERN_ERR, ha, "%s: Not a flash node session\n",
+			   __func__);
+		ret = -ENXIO;
+		goto exit_ddb_logout;
+	}
+
+	if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Logout from boot target entry is not permitted.\n",
+			   __func__);
+		ret = -EPERM;
+		goto exit_ddb_logout;
+	}
+
+	options = LOGOUT_OPTION_CLOSE_SESSION;
+	if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) {
+		ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
+		ret = -EIO;
+		goto exit_ddb_logout;
+	}
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Unable to allocate dma buffer\n", __func__);
+		ret = -ENOMEM;
+		goto exit_ddb_logout;
+	}
+
+	wtime = jiffies + (HZ * LOGOUT_TOV);
+	do {
+		ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+					      fw_ddb_entry, fw_ddb_entry_dma,
+					      NULL, NULL, &ddb_state, NULL,
+					      NULL, NULL);
+		if (ret == QLA_ERROR)
+			goto ddb_logout_clr_sess;
+
+		if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
+		    (ddb_state == DDB_DS_SESSION_FAILED))
+			goto ddb_logout_clr_sess;
+
+		schedule_timeout_uninterruptible(HZ);
+	} while ((time_after(wtime, jiffies)));
+
+ddb_logout_clr_sess:
+	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+	/*
+	 * we have decremented the reference count of the driver
+	 * when we setup the session to have the driver unload
+	 * to be seamless without actually destroying the
+	 * session
+	 **/
+	try_module_get(qla4xxx_iscsi_transport.owner);
+	iscsi_destroy_endpoint(ddb_entry->conn->ep);
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	qla4xxx_free_ddb(ha, ddb_entry);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	iscsi_session_teardown(ddb_entry->sess);
+
+	ret = QLA_SUCCESS;
+
+exit_ddb_logout:
+	if (fw_ddb_entry)
+		dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+				  fw_ddb_entry, fw_ddb_entry_dma);
+	return ret;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_logout - Logout from the specified target
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ * @fnode_conn: pointer to connection attrs of flash ddb entry
+ *
+ * This performs log out from the specified target
+ **/
+static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
+				    struct iscsi_bus_flash_conn *fnode_conn)
+{
+	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct ql4_tuple_ddb *flash_tddb = NULL;
+	struct ql4_tuple_ddb *tmp_tddb = NULL;
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	struct ddb_entry *ddb_entry = NULL;
+	dma_addr_t fw_ddb_dma;
+	uint32_t next_idx = 0;
+	uint32_t state = 0, conn_err = 0;
+	uint16_t conn_id = 0;
+	int idx, index;
+	int status, ret = 0;
+
+	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+				      &fw_ddb_dma);
+	if (fw_ddb_entry == NULL) {
+		ql4_printk(KERN_ERR, ha, "%s:Out of memory\n", __func__);
+		ret = -ENOMEM;
+		goto exit_ddb_logout;
+	}
+
+	flash_tddb = vzalloc(sizeof(*flash_tddb));
+	if (!flash_tddb) {
+		ql4_printk(KERN_WARNING, ha,
+			   "%s:Memory Allocation failed.\n", __func__);
+		ret = -ENOMEM;
+		goto exit_ddb_logout;
+	}
+
+	tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+	if (!tmp_tddb) {
+		ql4_printk(KERN_WARNING, ha,
+			   "%s:Memory Allocation failed.\n", __func__);
+		ret = -ENOMEM;
+		goto exit_ddb_logout;
+	}
+
+	if (!fnode_sess->targetname) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s:Cannot logout from SendTarget entry\n",
+			   __func__);
+		ret = -EPERM;
+		goto exit_ddb_logout;
+	}
+
+	if (fnode_sess->is_boot_target) {
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Logout from boot target entry is not permitted.\n",
+			   __func__);
+		ret = -EPERM;
+		goto exit_ddb_logout;
+	}
+
+	strncpy(flash_tddb->iscsi_name, fnode_sess->targetname,
+		ISCSI_NAME_SIZE);
+
+	if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+		sprintf(flash_tddb->ip_addr, "%pI6", fnode_conn->ipaddress);
+	else
+		sprintf(flash_tddb->ip_addr, "%pI4", fnode_conn->ipaddress);
+
+	flash_tddb->tpgt = fnode_sess->tpgt;
+	flash_tddb->port = fnode_conn->port;
+
+	COPY_ISID(flash_tddb->isid, fnode_sess->isid);
+
+	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+		ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+		if (ddb_entry == NULL)
+			continue;
+
+		if (ddb_entry->ddb_type != FLASH_DDB)
+			continue;
+
+		index = ddb_entry->sess->target_id;
+		status = qla4xxx_get_fwddb_entry(ha, index, fw_ddb_entry,
+						 fw_ddb_dma, NULL, &next_idx,
+						 &state, &conn_err, NULL,
+						 &conn_id);
+		if (status == QLA_ERROR) {
+			ret = -ENOMEM;
+			break;
+		}
+
+		qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, NULL);
+
+		status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb,
+						   true);
+		if (status == QLA_SUCCESS) {
+			ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess);
+			break;
+		}
+	}
+
+	if (idx == MAX_DDB_ENTRIES)
+		ret = -ESRCH;
+
+exit_ddb_logout:
+	if (flash_tddb)
+		vfree(flash_tddb);
+	if (tmp_tddb)
+		vfree(tmp_tddb);
+	if (fw_ddb_entry)
+		dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+
+	return ret;
+}
+
+static int
+qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
+			    int param, char *buf)
+{
+	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct iscsi_bus_flash_conn *fnode_conn;
+	struct ql4_chap_table chap_tbl;
+	struct device *dev;
+	int parent_type, parent_index = 0xffff;
+	int rc = 0;
+
+	dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
+					iscsi_is_flashnode_conn_dev);
+	if (!dev)
+		return -EIO;
+
+	fnode_conn = iscsi_dev_to_flash_conn(dev);
+
+	switch (param) {
+	case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
+		rc = sprintf(buf, "%u\n", fnode_conn->is_fw_assigned_ipv6);
+		break;
+	case ISCSI_FLASHNODE_PORTAL_TYPE:
+		rc = sprintf(buf, "%s\n", fnode_sess->portal_type);
+		break;
+	case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
+		rc = sprintf(buf, "%u\n", fnode_sess->auto_snd_tgt_disable);
+		break;
+	case ISCSI_FLASHNODE_DISCOVERY_SESS:
+		rc = sprintf(buf, "%u\n", fnode_sess->discovery_sess);
+		break;
+	case ISCSI_FLASHNODE_ENTRY_EN:
+		rc = sprintf(buf, "%u\n", fnode_sess->entry_state);
+		break;
+	case ISCSI_FLASHNODE_HDR_DGST_EN:
+		rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en);
+		break;
+	case ISCSI_FLASHNODE_DATA_DGST_EN:
+		rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en);
+		break;
+	case ISCSI_FLASHNODE_IMM_DATA_EN:
+		rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en);
+		break;
+	case ISCSI_FLASHNODE_INITIAL_R2T_EN:
+		rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en);
+		break;
+	case ISCSI_FLASHNODE_DATASEQ_INORDER:
+		rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en);
+		break;
+	case ISCSI_FLASHNODE_PDU_INORDER:
+		rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en);
+		break;
+	case ISCSI_FLASHNODE_CHAP_AUTH_EN:
+		rc = sprintf(buf, "%u\n", fnode_sess->chap_auth_en);
+		break;
+	case ISCSI_FLASHNODE_SNACK_REQ_EN:
+		rc = sprintf(buf, "%u\n", fnode_conn->snack_req_en);
+		break;
+	case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
+		rc = sprintf(buf, "%u\n", fnode_sess->discovery_logout_en);
+		break;
+	case ISCSI_FLASHNODE_BIDI_CHAP_EN:
+		rc = sprintf(buf, "%u\n", fnode_sess->bidi_chap_en);
+		break;
+	case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
+		rc = sprintf(buf, "%u\n", fnode_sess->discovery_auth_optional);
+		break;
+	case ISCSI_FLASHNODE_ERL:
+		rc = sprintf(buf, "%u\n", fnode_sess->erl);
+		break;
+	case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
+		rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_stat);
+		break;
+	case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
+		rc = sprintf(buf, "%u\n", fnode_conn->tcp_nagle_disable);
+		break;
+	case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
+		rc = sprintf(buf, "%u\n", fnode_conn->tcp_wsf_disable);
+		break;
+	case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
+		rc = sprintf(buf, "%u\n", fnode_conn->tcp_timer_scale);
+		break;
+	case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
+		rc = sprintf(buf, "%u\n", fnode_conn->tcp_timestamp_en);
+		break;
+	case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
+		rc = sprintf(buf, "%u\n", fnode_conn->fragment_disable);
+		break;
+	case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
+		rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength);
+		break;
+	case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
+		rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength);
+		break;
+	case ISCSI_FLASHNODE_FIRST_BURST:
+		rc = sprintf(buf, "%u\n", fnode_sess->first_burst);
+		break;
+	case ISCSI_FLASHNODE_DEF_TIME2WAIT:
+		rc = sprintf(buf, "%u\n", fnode_sess->time2wait);
+		break;
+	case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
+		rc = sprintf(buf, "%u\n", fnode_sess->time2retain);
+		break;
+	case ISCSI_FLASHNODE_MAX_R2T:
+		rc = sprintf(buf, "%u\n", fnode_sess->max_r2t);
+		break;
+	case ISCSI_FLASHNODE_KEEPALIVE_TMO:
+		rc = sprintf(buf, "%u\n", fnode_conn->keepalive_timeout);
+		break;
+	case ISCSI_FLASHNODE_ISID:
+		rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
+			     fnode_sess->isid[0], fnode_sess->isid[1],
+			     fnode_sess->isid[2], fnode_sess->isid[3],
+			     fnode_sess->isid[4], fnode_sess->isid[5]);
+		break;
+	case ISCSI_FLASHNODE_TSID:
+		rc = sprintf(buf, "%u\n", fnode_sess->tsid);
+		break;
+	case ISCSI_FLASHNODE_PORT:
+		rc = sprintf(buf, "%d\n", fnode_conn->port);
+		break;
+	case ISCSI_FLASHNODE_MAX_BURST:
+		rc = sprintf(buf, "%u\n", fnode_sess->max_burst);
+		break;
+	case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
+		rc = sprintf(buf, "%u\n",
+			     fnode_sess->default_taskmgmt_timeout);
+		break;
+	case ISCSI_FLASHNODE_IPADDR:
+		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+			rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress);
+		else
+			rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress);
+		break;
+	case ISCSI_FLASHNODE_ALIAS:
+		if (fnode_sess->targetalias)
+			rc = sprintf(buf, "%s\n", fnode_sess->targetalias);
+		else
+			rc = sprintf(buf, "\n");
+		break;
+	case ISCSI_FLASHNODE_REDIRECT_IPADDR:
+		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+			rc = sprintf(buf, "%pI6\n",
+				     fnode_conn->redirect_ipaddr);
+		else
+			rc = sprintf(buf, "%pI4\n",
+				     fnode_conn->redirect_ipaddr);
+		break;
+	case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
+		rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size);
+		break;
+	case ISCSI_FLASHNODE_LOCAL_PORT:
+		rc = sprintf(buf, "%u\n", fnode_conn->local_port);
+		break;
+	case ISCSI_FLASHNODE_IPV4_TOS:
+		rc = sprintf(buf, "%u\n", fnode_conn->ipv4_tos);
+		break;
+	case ISCSI_FLASHNODE_IPV6_TC:
+		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+			rc = sprintf(buf, "%u\n",
+				     fnode_conn->ipv6_traffic_class);
+		else
+			rc = sprintf(buf, "\n");
+		break;
+	case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
+		rc = sprintf(buf, "%u\n", fnode_conn->ipv6_flow_label);
+		break;
+	case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
+		if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
+			rc = sprintf(buf, "%pI6\n",
+				     fnode_conn->link_local_ipv6_addr);
+		else
+			rc = sprintf(buf, "\n");
+		break;
+	case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
+		if ((fnode_sess->discovery_parent_idx) >= 0  &&
+		    (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES))
+			parent_index = fnode_sess->discovery_parent_idx;
+
+		rc = sprintf(buf, "%u\n", parent_index);
+		break;
+	case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
+		if (fnode_sess->discovery_parent_type == DDB_ISNS)
+			parent_type = ISCSI_DISC_PARENT_ISNS;
+		else if (fnode_sess->discovery_parent_type == DDB_NO_LINK)
+			parent_type = ISCSI_DISC_PARENT_UNKNOWN;
+		else if (fnode_sess->discovery_parent_type >= 0  &&
+			 fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES)
+			parent_type = ISCSI_DISC_PARENT_SENDTGT;
+		else
+			parent_type = ISCSI_DISC_PARENT_UNKNOWN;
+
+		rc = sprintf(buf, "%s\n",
+			     iscsi_get_discovery_parent_name(parent_type));
+		break;
+	case ISCSI_FLASHNODE_NAME:
+		if (fnode_sess->targetname)
+			rc = sprintf(buf, "%s\n", fnode_sess->targetname);
+		else
+			rc = sprintf(buf, "\n");
+		break;
+	case ISCSI_FLASHNODE_TPGT:
+		rc = sprintf(buf, "%u\n", fnode_sess->tpgt);
+		break;
+	case ISCSI_FLASHNODE_TCP_XMIT_WSF:
+		rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf);
+		break;
+	case ISCSI_FLASHNODE_TCP_RECV_WSF:
+		rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf);
+		break;
+	case ISCSI_FLASHNODE_CHAP_OUT_IDX:
+		rc = sprintf(buf, "%u\n", fnode_sess->chap_out_idx);
+		break;
+	case ISCSI_FLASHNODE_USERNAME:
+		if (fnode_sess->chap_auth_en) {
+			qla4xxx_get_uni_chap_at_index(ha,
+						      chap_tbl.name,
+						      chap_tbl.secret,
+						      fnode_sess->chap_out_idx);
+			rc = sprintf(buf, "%s\n", chap_tbl.name);
+		} else {
+			rc = sprintf(buf, "\n");
+		}
+		break;
+	case ISCSI_FLASHNODE_PASSWORD:
+		if (fnode_sess->chap_auth_en) {
+			qla4xxx_get_uni_chap_at_index(ha,
+						      chap_tbl.name,
+						      chap_tbl.secret,
+						      fnode_sess->chap_out_idx);
+			rc = sprintf(buf, "%s\n", chap_tbl.secret);
+		} else {
+			rc = sprintf(buf, "\n");
+		}
+		break;
+	case ISCSI_FLASHNODE_STATSN:
+		rc = sprintf(buf, "%u\n", fnode_conn->statsn);
+		break;
+	case ISCSI_FLASHNODE_EXP_STATSN:
+		rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn);
+		break;
+	case ISCSI_FLASHNODE_IS_BOOT_TGT:
+		rc = sprintf(buf, "%u\n", fnode_sess->is_boot_target);
+		break;
+	default:
+		rc = -ENOSYS;
+		break;
+	}
+	return rc;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_set_param - Set parameter for firmware DDB entry
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ * @fnode_conn: pointer to connection attrs of flash ddb entry
+ * @data: Parameters and their values to update
+ * @len: len of data
+ *
+ * This sets the parameter of flash ddb entry and writes them to flash
+ **/
+static int
+qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
+			    struct iscsi_bus_flash_conn *fnode_conn,
+			    void *data, int len)
+{
+	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	struct iscsi_flashnode_param_info *fnode_param;
+	struct nlattr *attr;
+	int rc = QLA_ERROR;
+	uint32_t rem = len;
+
+	fw_ddb_entry = kzalloc(sizeof(*fw_ddb_entry), GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Unable to allocate ddb buffer\n",
+				  __func__));
+		return -ENOMEM;
+	}
+
+	nla_for_each_attr(attr, data, len, rem) {
+		fnode_param = nla_data(attr);
+
+		switch (fnode_param->param) {
+		case ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6:
+			fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_PORTAL_TYPE:
+			memcpy(fnode_sess->portal_type, fnode_param->value,
+			       strlen(fnode_sess->portal_type));
+			break;
+		case ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE:
+			fnode_sess->auto_snd_tgt_disable =
+							fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_DISCOVERY_SESS:
+			fnode_sess->discovery_sess = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_ENTRY_EN:
+			fnode_sess->entry_state = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_HDR_DGST_EN:
+			fnode_conn->hdrdgst_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_DATA_DGST_EN:
+			fnode_conn->datadgst_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_IMM_DATA_EN:
+			fnode_sess->imm_data_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_INITIAL_R2T_EN:
+			fnode_sess->initial_r2t_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_DATASEQ_INORDER:
+			fnode_sess->dataseq_inorder_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_PDU_INORDER:
+			fnode_sess->pdu_inorder_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_CHAP_AUTH_EN:
+			fnode_sess->chap_auth_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_SNACK_REQ_EN:
+			fnode_conn->snack_req_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN:
+			fnode_sess->discovery_logout_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_BIDI_CHAP_EN:
+			fnode_sess->bidi_chap_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL:
+			fnode_sess->discovery_auth_optional =
+							fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_ERL:
+			fnode_sess->erl = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT:
+			fnode_conn->tcp_timestamp_stat = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_TCP_NAGLE_DISABLE:
+			fnode_conn->tcp_nagle_disable = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_TCP_WSF_DISABLE:
+			fnode_conn->tcp_wsf_disable = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_TCP_TIMER_SCALE:
+			fnode_conn->tcp_timer_scale = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_TCP_TIMESTAMP_EN:
+			fnode_conn->tcp_timestamp_en = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_IP_FRAG_DISABLE:
+			fnode_conn->fragment_disable = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_MAX_RECV_DLENGTH:
+			fnode_conn->max_recv_dlength =
+					*(unsigned *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_MAX_XMIT_DLENGTH:
+			fnode_conn->max_xmit_dlength =
+					*(unsigned *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_FIRST_BURST:
+			fnode_sess->first_burst =
+					*(unsigned *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_DEF_TIME2WAIT:
+			fnode_sess->time2wait = *(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_DEF_TIME2RETAIN:
+			fnode_sess->time2retain =
+						*(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_MAX_R2T:
+			fnode_sess->max_r2t =
+					*(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_KEEPALIVE_TMO:
+			fnode_conn->keepalive_timeout =
+				*(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_ISID:
+			memcpy(fnode_sess->isid, fnode_param->value,
+			       sizeof(fnode_sess->isid));
+			break;
+		case ISCSI_FLASHNODE_TSID:
+			fnode_sess->tsid = *(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_PORT:
+			fnode_conn->port = *(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_MAX_BURST:
+			fnode_sess->max_burst = *(unsigned *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_DEF_TASKMGMT_TMO:
+			fnode_sess->default_taskmgmt_timeout =
+						*(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_IPADDR:
+			memcpy(fnode_conn->ipaddress, fnode_param->value,
+			       IPv6_ADDR_LEN);
+			break;
+		case ISCSI_FLASHNODE_ALIAS:
+			rc = iscsi_switch_str_param(&fnode_sess->targetalias,
+						    (char *)fnode_param->value);
+			break;
+		case ISCSI_FLASHNODE_REDIRECT_IPADDR:
+			memcpy(fnode_conn->redirect_ipaddr, fnode_param->value,
+			       IPv6_ADDR_LEN);
+			break;
+		case ISCSI_FLASHNODE_MAX_SEGMENT_SIZE:
+			fnode_conn->max_segment_size =
+					*(unsigned *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_LOCAL_PORT:
+			fnode_conn->local_port =
+						*(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_IPV4_TOS:
+			fnode_conn->ipv4_tos = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_IPV6_TC:
+			fnode_conn->ipv6_traffic_class = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_IPV6_FLOW_LABEL:
+			fnode_conn->ipv6_flow_label = fnode_param->value[0];
+			break;
+		case ISCSI_FLASHNODE_NAME:
+			rc = iscsi_switch_str_param(&fnode_sess->targetname,
+						    (char *)fnode_param->value);
+			break;
+		case ISCSI_FLASHNODE_TPGT:
+			fnode_sess->tpgt = *(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_LINK_LOCAL_IPV6:
+			memcpy(fnode_conn->link_local_ipv6_addr,
+			       fnode_param->value, IPv6_ADDR_LEN);
+			break;
+		case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
+			fnode_sess->discovery_parent_type =
+						*(uint16_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_TCP_XMIT_WSF:
+			fnode_conn->tcp_xmit_wsf =
+						*(uint8_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_TCP_RECV_WSF:
+			fnode_conn->tcp_recv_wsf =
+						*(uint8_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_STATSN:
+			fnode_conn->statsn = *(uint32_t *)fnode_param->value;
+			break;
+		case ISCSI_FLASHNODE_EXP_STATSN:
+			fnode_conn->exp_statsn =
+						*(uint32_t *)fnode_param->value;
+			break;
+		default:
+			ql4_printk(KERN_ERR, ha,
+				   "%s: No such sysfs attribute\n", __func__);
+			rc = -ENOSYS;
+			goto exit_set_param;
+		}
+	}
+
+	rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn);
+
+exit_set_param:
+	return rc;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_delete - Delete firmware DDB entry
+ * @fnode_sess: pointer to session attrs of flash ddb entry
+ *
+ * This invalidates the flash ddb entry at the given index
+ **/
+static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess)
+{
+	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);
+	struct scsi_qla_host *ha = to_qla_host(shost);
+	uint32_t dev_db_start_offset;
+	uint32_t dev_db_end_offset;
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	uint16_t *ddb_cookie = NULL;
+	size_t ddb_size;
+	void *pddb = NULL;
+	int target_id;
+	int rc = 0;
+
+	if (!fnode_sess) {
+		rc = -EINVAL;
+		goto exit_ddb_del;
+	}
+
+	if (fnode_sess->is_boot_target) {
+		rc = -EPERM;
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Deletion of boot target entry is not permitted.\n",
+				  __func__));
+		goto exit_ddb_del;
+	}
+
+	if (fnode_sess->flash_state == DEV_DB_NON_PERSISTENT)
+		goto sysfs_ddb_del;
+
+	if (is_qla40XX(ha)) {
+		dev_db_start_offset = FLASH_OFFSET_DB_INFO;
+		dev_db_end_offset = FLASH_OFFSET_DB_END;
+		dev_db_start_offset += (fnode_sess->target_id *
+				       sizeof(*fw_ddb_entry));
+		ddb_size = sizeof(*fw_ddb_entry);
+	} else {
+		dev_db_start_offset = FLASH_RAW_ACCESS_ADDR +
+				      (ha->hw.flt_region_ddb << 2);
+		/* flt_ddb_size is DDB table size for both ports
+		 * so divide it by 2 to calculate the offset for second port
+		 */
+		if (ha->port_num == 1)
+			dev_db_start_offset += (ha->hw.flt_ddb_size / 2);
+
+		dev_db_end_offset = dev_db_start_offset +
+				    (ha->hw.flt_ddb_size / 2);
+
+		dev_db_start_offset += (fnode_sess->target_id *
+				       sizeof(*fw_ddb_entry));
+		dev_db_start_offset += (void *)&(fw_ddb_entry->cookie) -
+				       (void *)fw_ddb_entry;
+
+		ddb_size = sizeof(*ddb_cookie);
+	}
+
+	DEBUG2(ql4_printk(KERN_ERR, ha, "%s: start offset=%u, end offset=%u\n",
+			  __func__, dev_db_start_offset, dev_db_end_offset));
+
+	if (dev_db_start_offset > dev_db_end_offset) {
+		rc = -EIO;
+		DEBUG2(ql4_printk(KERN_ERR, ha, "%s:Invalid DDB index %u\n",
+				  __func__, fnode_sess->target_id));
+		goto exit_ddb_del;
+	}
+
+	pddb = dma_alloc_coherent(&ha->pdev->dev, ddb_size,
+				  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!pddb) {
+		rc = -ENOMEM;
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Unable to allocate dma buffer\n",
+				  __func__));
+		goto exit_ddb_del;
+	}
+
+	if (is_qla40XX(ha)) {
+		fw_ddb_entry = pddb;
+		memset(fw_ddb_entry, 0, ddb_size);
+		ddb_cookie = &fw_ddb_entry->cookie;
+	} else {
+		ddb_cookie = pddb;
+	}
+
+	/* invalidate the cookie */
+	*ddb_cookie = 0xFFEE;
+	qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
+			  ddb_size, FLASH_OPT_RMW_COMMIT);
+
+sysfs_ddb_del:
+	target_id = fnode_sess->target_id;
+	iscsi_destroy_flashnode_sess(fnode_sess);
+	ql4_printk(KERN_INFO, ha,
+		   "%s: session and conn entries for flashnode %u of host %lu deleted\n",
+		   __func__, target_id, ha->host_no);
+exit_ddb_del:
+	if (pddb)
+		dma_free_coherent(&ha->pdev->dev, ddb_size, pddb,
+				  fw_ddb_entry_dma);
+	return rc;
+}
+
+/**
+ * qla4xxx_sysfs_ddb_export - Create sysfs entries for firmware DDBs
+ * @ha: pointer to adapter structure
+ *
+ * Export the firmware DDB for all send targets and normal targets to sysfs.
+ **/
+static int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
+{
+	struct dev_db_entry *fw_ddb_entry = NULL;
+	dma_addr_t fw_ddb_entry_dma;
+	uint16_t max_ddbs;
+	uint16_t idx = 0;
+	int ret = QLA_SUCCESS;
+
+	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
+					  sizeof(*fw_ddb_entry),
+					  &fw_ddb_entry_dma, GFP_KERNEL);
+	if (!fw_ddb_entry) {
+		DEBUG2(ql4_printk(KERN_ERR, ha,
+				  "%s: Unable to allocate dma buffer\n",
+				  __func__));
+		return -ENOMEM;
+	}
+
+	max_ddbs =  is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+				     MAX_DEV_DB_ENTRIES;
+
+	for (idx = 0; idx < max_ddbs; idx++) {
+		if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma,
+					     idx))
+			continue;
+
+		ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, &idx, 0);
+		if (ret) {
+			ret = -EIO;
+			break;
+		}
+	}
+
+	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
+			  fw_ddb_entry_dma);
+
+	return ret;
+}
+
+static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha)
+{
+	iscsi_destroy_all_flashnode(ha->host);
+}
+
 /**
  * qla4xxx_build_ddb_list - Build ddb list and setup sessions
  * @ha: pointer to adapter structure
@@ -5341,8 +7016,11 @@
 	status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
 
 	/* Dont retry adapter initialization if IRQ allocation failed */
-	if (!test_bit(AF_IRQ_ATTACHED, &ha->flags))
+	if (is_qla80XX(ha) && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
+		ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization\n",
+			   __func__);
 		goto skip_retry_init;
+	}
 
 	while ((!test_bit(AF_ONLINE, &ha->flags)) &&
 	    init_retry_count++ < MAX_INIT_RETRIES) {
@@ -5445,6 +7123,10 @@
 		ql4_printk(KERN_ERR, ha,
 			   "%s: No iSCSI boot target configured\n", __func__);
 
+	if (qla4xxx_sysfs_ddb_export(ha))
+		ql4_printk(KERN_ERR, ha,
+			   "%s: Error exporting ddb to sysfs\n", __func__);
+
 		/* Perform the build ddb list and login to each */
 	qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
 	iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
@@ -5570,6 +7252,7 @@
 	qla4xxx_destroy_fw_ddb_session(ha);
 	qla4_8xxx_free_sysfs_attr(ha);
 
+	qla4xxx_sysfs_ddb_remove(ha);
 	scsi_remove_host(ha->host);
 
 	qla4xxx_free_adapter(ha);
@@ -5669,7 +7352,6 @@
 
 	/* update counters */
 	if (srb->flags & SRB_DMA_VALID) {
-		ha->req_q_count += srb->iocb_cnt;
 		ha->iocb_cnt -= srb->iocb_cnt;
 		if (srb->cmd)
 			srb->cmd->host_scribble =
@@ -6081,6 +7763,7 @@
 {
 	struct scsi_qla_host *ha = to_qla_host(shost);
 	int rval = QLA_SUCCESS;
+	uint32_t idc_ctrl;
 
 	if (ql4xdontresethba) {
 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
@@ -6111,6 +7794,14 @@
 	}
 
 recover_adapter:
+	/* For ISP83XX set graceful reset bit in IDC_DRV_CTRL if
+	 * reset is issued by application */
+	if (is_qla8032(ha) && test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+		idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
+		qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
+				 (idc_ctrl | GRACEFUL_RESET_BIT1));
+	}
+
 	rval = qla4xxx_recover_adapter(ha);
 	if (rval != QLA_SUCCESS) {
 		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 6775a45..83e0fec 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION	"5.03.00-k4"
+#define QLA4XXX_DRIVER_VERSION	"5.03.00-k8"
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0a74b97..ce06e87 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -25,6 +25,7 @@
 #include <linux/slab.h>
 #include <linux/bsg-lib.h>
 #include <linux/idr.h>
+#include <linux/list.h>
 #include <net/tcp.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
@@ -460,6 +461,689 @@
 EXPORT_SYMBOL_GPL(iscsi_destroy_iface);
 
 /*
+ * Interface to display flash node params to sysfs
+ */
+
+#define ISCSI_FLASHNODE_ATTR(_prefix, _name, _mode, _show, _store)	\
+struct device_attribute dev_attr_##_prefix##_##_name =			\
+	__ATTR(_name, _mode, _show, _store)
+
+/* flash node session attrs show */
+#define iscsi_flashnode_sess_attr_show(type, name, param)		\
+static ssize_t								\
+show_##type##_##name(struct device *dev, struct device_attribute *attr,	\
+		     char *buf)						\
+{									\
+	struct iscsi_bus_flash_session *fnode_sess =			\
+					iscsi_dev_to_flash_session(dev);\
+	struct iscsi_transport *t = fnode_sess->transport;		\
+	return t->get_flashnode_param(fnode_sess, param, buf);		\
+}									\
+
+
+#define iscsi_flashnode_sess_attr(type, name, param)			\
+	iscsi_flashnode_sess_attr_show(type, name, param)		\
+static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO,			\
+			    show_##type##_##name, NULL);
+
+/* Flash node session attributes */
+
+iscsi_flashnode_sess_attr(fnode, auto_snd_tgt_disable,
+			  ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE);
+iscsi_flashnode_sess_attr(fnode, discovery_session,
+			  ISCSI_FLASHNODE_DISCOVERY_SESS);
+iscsi_flashnode_sess_attr(fnode, portal_type, ISCSI_FLASHNODE_PORTAL_TYPE);
+iscsi_flashnode_sess_attr(fnode, entry_enable, ISCSI_FLASHNODE_ENTRY_EN);
+iscsi_flashnode_sess_attr(fnode, immediate_data, ISCSI_FLASHNODE_IMM_DATA_EN);
+iscsi_flashnode_sess_attr(fnode, initial_r2t, ISCSI_FLASHNODE_INITIAL_R2T_EN);
+iscsi_flashnode_sess_attr(fnode, data_seq_in_order,
+			  ISCSI_FLASHNODE_DATASEQ_INORDER);
+iscsi_flashnode_sess_attr(fnode, data_pdu_in_order,
+			  ISCSI_FLASHNODE_PDU_INORDER);
+iscsi_flashnode_sess_attr(fnode, chap_auth, ISCSI_FLASHNODE_CHAP_AUTH_EN);
+iscsi_flashnode_sess_attr(fnode, discovery_logout,
+			  ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN);
+iscsi_flashnode_sess_attr(fnode, bidi_chap, ISCSI_FLASHNODE_BIDI_CHAP_EN);
+iscsi_flashnode_sess_attr(fnode, discovery_auth_optional,
+			  ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL);
+iscsi_flashnode_sess_attr(fnode, erl, ISCSI_FLASHNODE_ERL);
+iscsi_flashnode_sess_attr(fnode, first_burst_len, ISCSI_FLASHNODE_FIRST_BURST);
+iscsi_flashnode_sess_attr(fnode, def_time2wait, ISCSI_FLASHNODE_DEF_TIME2WAIT);
+iscsi_flashnode_sess_attr(fnode, def_time2retain,
+			  ISCSI_FLASHNODE_DEF_TIME2RETAIN);
+iscsi_flashnode_sess_attr(fnode, max_outstanding_r2t, ISCSI_FLASHNODE_MAX_R2T);
+iscsi_flashnode_sess_attr(fnode, isid, ISCSI_FLASHNODE_ISID);
+iscsi_flashnode_sess_attr(fnode, tsid, ISCSI_FLASHNODE_TSID);
+iscsi_flashnode_sess_attr(fnode, max_burst_len, ISCSI_FLASHNODE_MAX_BURST);
+iscsi_flashnode_sess_attr(fnode, def_taskmgmt_tmo,
+			  ISCSI_FLASHNODE_DEF_TASKMGMT_TMO);
+iscsi_flashnode_sess_attr(fnode, targetalias, ISCSI_FLASHNODE_ALIAS);
+iscsi_flashnode_sess_attr(fnode, targetname, ISCSI_FLASHNODE_NAME);
+iscsi_flashnode_sess_attr(fnode, tpgt, ISCSI_FLASHNODE_TPGT);
+iscsi_flashnode_sess_attr(fnode, discovery_parent_idx,
+			  ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX);
+iscsi_flashnode_sess_attr(fnode, discovery_parent_type,
+			  ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE);
+iscsi_flashnode_sess_attr(fnode, chap_in_idx, ISCSI_FLASHNODE_CHAP_IN_IDX);
+iscsi_flashnode_sess_attr(fnode, chap_out_idx, ISCSI_FLASHNODE_CHAP_OUT_IDX);
+iscsi_flashnode_sess_attr(fnode, username, ISCSI_FLASHNODE_USERNAME);
+iscsi_flashnode_sess_attr(fnode, username_in, ISCSI_FLASHNODE_USERNAME_IN);
+iscsi_flashnode_sess_attr(fnode, password, ISCSI_FLASHNODE_PASSWORD);
+iscsi_flashnode_sess_attr(fnode, password_in, ISCSI_FLASHNODE_PASSWORD_IN);
+iscsi_flashnode_sess_attr(fnode, is_boot_target, ISCSI_FLASHNODE_IS_BOOT_TGT);
+
+static struct attribute *iscsi_flashnode_sess_attrs[] = {
+	&dev_attr_fnode_auto_snd_tgt_disable.attr,
+	&dev_attr_fnode_discovery_session.attr,
+	&dev_attr_fnode_portal_type.attr,
+	&dev_attr_fnode_entry_enable.attr,
+	&dev_attr_fnode_immediate_data.attr,
+	&dev_attr_fnode_initial_r2t.attr,
+	&dev_attr_fnode_data_seq_in_order.attr,
+	&dev_attr_fnode_data_pdu_in_order.attr,
+	&dev_attr_fnode_chap_auth.attr,
+	&dev_attr_fnode_discovery_logout.attr,
+	&dev_attr_fnode_bidi_chap.attr,
+	&dev_attr_fnode_discovery_auth_optional.attr,
+	&dev_attr_fnode_erl.attr,
+	&dev_attr_fnode_first_burst_len.attr,
+	&dev_attr_fnode_def_time2wait.attr,
+	&dev_attr_fnode_def_time2retain.attr,
+	&dev_attr_fnode_max_outstanding_r2t.attr,
+	&dev_attr_fnode_isid.attr,
+	&dev_attr_fnode_tsid.attr,
+	&dev_attr_fnode_max_burst_len.attr,
+	&dev_attr_fnode_def_taskmgmt_tmo.attr,
+	&dev_attr_fnode_targetalias.attr,
+	&dev_attr_fnode_targetname.attr,
+	&dev_attr_fnode_tpgt.attr,
+	&dev_attr_fnode_discovery_parent_idx.attr,
+	&dev_attr_fnode_discovery_parent_type.attr,
+	&dev_attr_fnode_chap_in_idx.attr,
+	&dev_attr_fnode_chap_out_idx.attr,
+	&dev_attr_fnode_username.attr,
+	&dev_attr_fnode_username_in.attr,
+	&dev_attr_fnode_password.attr,
+	&dev_attr_fnode_password_in.attr,
+	&dev_attr_fnode_is_boot_target.attr,
+	NULL,
+};
+
+static umode_t iscsi_flashnode_sess_attr_is_visible(struct kobject *kobj,
+						    struct attribute *attr,
+						    int i)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct iscsi_bus_flash_session *fnode_sess =
+						iscsi_dev_to_flash_session(dev);
+	struct iscsi_transport *t = fnode_sess->transport;
+	int param;
+
+	if (attr == &dev_attr_fnode_auto_snd_tgt_disable.attr) {
+		param = ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE;
+	} else if (attr == &dev_attr_fnode_discovery_session.attr) {
+		param = ISCSI_FLASHNODE_DISCOVERY_SESS;
+	} else if (attr == &dev_attr_fnode_portal_type.attr) {
+		param = ISCSI_FLASHNODE_PORTAL_TYPE;
+	} else if (attr == &dev_attr_fnode_entry_enable.attr) {
+		param = ISCSI_FLASHNODE_ENTRY_EN;
+	} else if (attr == &dev_attr_fnode_immediate_data.attr) {
+		param = ISCSI_FLASHNODE_IMM_DATA_EN;
+	} else if (attr == &dev_attr_fnode_initial_r2t.attr) {
+		param = ISCSI_FLASHNODE_INITIAL_R2T_EN;
+	} else if (attr == &dev_attr_fnode_data_seq_in_order.attr) {
+		param = ISCSI_FLASHNODE_DATASEQ_INORDER;
+	} else if (attr == &dev_attr_fnode_data_pdu_in_order.attr) {
+		param = ISCSI_FLASHNODE_PDU_INORDER;
+	} else if (attr == &dev_attr_fnode_chap_auth.attr) {
+		param = ISCSI_FLASHNODE_CHAP_AUTH_EN;
+	} else if (attr == &dev_attr_fnode_discovery_logout.attr) {
+		param = ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN;
+	} else if (attr == &dev_attr_fnode_bidi_chap.attr) {
+		param = ISCSI_FLASHNODE_BIDI_CHAP_EN;
+	} else if (attr == &dev_attr_fnode_discovery_auth_optional.attr) {
+		param = ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL;
+	} else if (attr == &dev_attr_fnode_erl.attr) {
+		param = ISCSI_FLASHNODE_ERL;
+	} else if (attr == &dev_attr_fnode_first_burst_len.attr) {
+		param = ISCSI_FLASHNODE_FIRST_BURST;
+	} else if (attr == &dev_attr_fnode_def_time2wait.attr) {
+		param = ISCSI_FLASHNODE_DEF_TIME2WAIT;
+	} else if (attr == &dev_attr_fnode_def_time2retain.attr) {
+		param = ISCSI_FLASHNODE_DEF_TIME2RETAIN;
+	} else if (attr == &dev_attr_fnode_max_outstanding_r2t.attr) {
+		param = ISCSI_FLASHNODE_MAX_R2T;
+	} else if (attr == &dev_attr_fnode_isid.attr) {
+		param = ISCSI_FLASHNODE_ISID;
+	} else if (attr == &dev_attr_fnode_tsid.attr) {
+		param = ISCSI_FLASHNODE_TSID;
+	} else if (attr == &dev_attr_fnode_max_burst_len.attr) {
+		param = ISCSI_FLASHNODE_MAX_BURST;
+	} else if (attr == &dev_attr_fnode_def_taskmgmt_tmo.attr) {
+		param = ISCSI_FLASHNODE_DEF_TASKMGMT_TMO;
+	} else if (attr == &dev_attr_fnode_targetalias.attr) {
+		param = ISCSI_FLASHNODE_ALIAS;
+	} else if (attr == &dev_attr_fnode_targetname.attr) {
+		param = ISCSI_FLASHNODE_NAME;
+	} else if (attr == &dev_attr_fnode_tpgt.attr) {
+		param = ISCSI_FLASHNODE_TPGT;
+	} else if (attr == &dev_attr_fnode_discovery_parent_idx.attr) {
+		param = ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX;
+	} else if (attr == &dev_attr_fnode_discovery_parent_type.attr) {
+		param = ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE;
+	} else if (attr == &dev_attr_fnode_chap_in_idx.attr) {
+		param = ISCSI_FLASHNODE_CHAP_IN_IDX;
+	} else if (attr == &dev_attr_fnode_chap_out_idx.attr) {
+		param = ISCSI_FLASHNODE_CHAP_OUT_IDX;
+	} else if (attr == &dev_attr_fnode_username.attr) {
+		param = ISCSI_FLASHNODE_USERNAME;
+	} else if (attr == &dev_attr_fnode_username_in.attr) {
+		param = ISCSI_FLASHNODE_USERNAME_IN;
+	} else if (attr == &dev_attr_fnode_password.attr) {
+		param = ISCSI_FLASHNODE_PASSWORD;
+	} else if (attr == &dev_attr_fnode_password_in.attr) {
+		param = ISCSI_FLASHNODE_PASSWORD_IN;
+	} else if (attr == &dev_attr_fnode_is_boot_target.attr) {
+		param = ISCSI_FLASHNODE_IS_BOOT_TGT;
+	} else {
+		WARN_ONCE(1, "Invalid flashnode session attr");
+		return 0;
+	}
+
+	return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param);
+}
+
+static struct attribute_group iscsi_flashnode_sess_attr_group = {
+	.attrs = iscsi_flashnode_sess_attrs,
+	.is_visible = iscsi_flashnode_sess_attr_is_visible,
+};
+
+static const struct attribute_group *iscsi_flashnode_sess_attr_groups[] = {
+	&iscsi_flashnode_sess_attr_group,
+	NULL,
+};
+
+static void iscsi_flashnode_sess_release(struct device *dev)
+{
+	struct iscsi_bus_flash_session *fnode_sess =
+						iscsi_dev_to_flash_session(dev);
+
+	kfree(fnode_sess->targetname);
+	kfree(fnode_sess->targetalias);
+	kfree(fnode_sess->portal_type);
+	kfree(fnode_sess);
+}
+
+struct device_type iscsi_flashnode_sess_dev_type = {
+	.name = "iscsi_flashnode_sess_dev_type",
+	.groups = iscsi_flashnode_sess_attr_groups,
+	.release = iscsi_flashnode_sess_release,
+};
+
+/* flash node connection attrs show */
+#define iscsi_flashnode_conn_attr_show(type, name, param)		\
+static ssize_t								\
+show_##type##_##name(struct device *dev, struct device_attribute *attr,	\
+		     char *buf)						\
+{									\
+	struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);\
+	struct iscsi_bus_flash_session *fnode_sess =			\
+				iscsi_flash_conn_to_flash_session(fnode_conn);\
+	struct iscsi_transport *t = fnode_conn->transport;		\
+	return t->get_flashnode_param(fnode_sess, param, buf);		\
+}									\
+
+
+#define iscsi_flashnode_conn_attr(type, name, param)			\
+	iscsi_flashnode_conn_attr_show(type, name, param)		\
+static ISCSI_FLASHNODE_ATTR(type, name, S_IRUGO,			\
+			    show_##type##_##name, NULL);
+
+/* Flash node connection attributes */
+
+iscsi_flashnode_conn_attr(fnode, is_fw_assigned_ipv6,
+			  ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6);
+iscsi_flashnode_conn_attr(fnode, header_digest, ISCSI_FLASHNODE_HDR_DGST_EN);
+iscsi_flashnode_conn_attr(fnode, data_digest, ISCSI_FLASHNODE_DATA_DGST_EN);
+iscsi_flashnode_conn_attr(fnode, snack_req, ISCSI_FLASHNODE_SNACK_REQ_EN);
+iscsi_flashnode_conn_attr(fnode, tcp_timestamp_stat,
+			  ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT);
+iscsi_flashnode_conn_attr(fnode, tcp_nagle_disable,
+			  ISCSI_FLASHNODE_TCP_NAGLE_DISABLE);
+iscsi_flashnode_conn_attr(fnode, tcp_wsf_disable,
+			  ISCSI_FLASHNODE_TCP_WSF_DISABLE);
+iscsi_flashnode_conn_attr(fnode, tcp_timer_scale,
+			  ISCSI_FLASHNODE_TCP_TIMER_SCALE);
+iscsi_flashnode_conn_attr(fnode, tcp_timestamp_enable,
+			  ISCSI_FLASHNODE_TCP_TIMESTAMP_EN);
+iscsi_flashnode_conn_attr(fnode, fragment_disable,
+			  ISCSI_FLASHNODE_IP_FRAG_DISABLE);
+iscsi_flashnode_conn_attr(fnode, keepalive_tmo, ISCSI_FLASHNODE_KEEPALIVE_TMO);
+iscsi_flashnode_conn_attr(fnode, port, ISCSI_FLASHNODE_PORT);
+iscsi_flashnode_conn_attr(fnode, ipaddress, ISCSI_FLASHNODE_IPADDR);
+iscsi_flashnode_conn_attr(fnode, max_recv_dlength,
+			  ISCSI_FLASHNODE_MAX_RECV_DLENGTH);
+iscsi_flashnode_conn_attr(fnode, max_xmit_dlength,
+			  ISCSI_FLASHNODE_MAX_XMIT_DLENGTH);
+iscsi_flashnode_conn_attr(fnode, local_port, ISCSI_FLASHNODE_LOCAL_PORT);
+iscsi_flashnode_conn_attr(fnode, ipv4_tos, ISCSI_FLASHNODE_IPV4_TOS);
+iscsi_flashnode_conn_attr(fnode, ipv6_traffic_class, ISCSI_FLASHNODE_IPV6_TC);
+iscsi_flashnode_conn_attr(fnode, ipv6_flow_label,
+			  ISCSI_FLASHNODE_IPV6_FLOW_LABEL);
+iscsi_flashnode_conn_attr(fnode, redirect_ipaddr,
+			  ISCSI_FLASHNODE_REDIRECT_IPADDR);
+iscsi_flashnode_conn_attr(fnode, max_segment_size,
+			  ISCSI_FLASHNODE_MAX_SEGMENT_SIZE);
+iscsi_flashnode_conn_attr(fnode, link_local_ipv6,
+			  ISCSI_FLASHNODE_LINK_LOCAL_IPV6);
+iscsi_flashnode_conn_attr(fnode, tcp_xmit_wsf, ISCSI_FLASHNODE_TCP_XMIT_WSF);
+iscsi_flashnode_conn_attr(fnode, tcp_recv_wsf, ISCSI_FLASHNODE_TCP_RECV_WSF);
+iscsi_flashnode_conn_attr(fnode, statsn, ISCSI_FLASHNODE_STATSN);
+iscsi_flashnode_conn_attr(fnode, exp_statsn, ISCSI_FLASHNODE_EXP_STATSN);
+
+static struct attribute *iscsi_flashnode_conn_attrs[] = {
+	&dev_attr_fnode_is_fw_assigned_ipv6.attr,
+	&dev_attr_fnode_header_digest.attr,
+	&dev_attr_fnode_data_digest.attr,
+	&dev_attr_fnode_snack_req.attr,
+	&dev_attr_fnode_tcp_timestamp_stat.attr,
+	&dev_attr_fnode_tcp_nagle_disable.attr,
+	&dev_attr_fnode_tcp_wsf_disable.attr,
+	&dev_attr_fnode_tcp_timer_scale.attr,
+	&dev_attr_fnode_tcp_timestamp_enable.attr,
+	&dev_attr_fnode_fragment_disable.attr,
+	&dev_attr_fnode_max_recv_dlength.attr,
+	&dev_attr_fnode_max_xmit_dlength.attr,
+	&dev_attr_fnode_keepalive_tmo.attr,
+	&dev_attr_fnode_port.attr,
+	&dev_attr_fnode_ipaddress.attr,
+	&dev_attr_fnode_redirect_ipaddr.attr,
+	&dev_attr_fnode_max_segment_size.attr,
+	&dev_attr_fnode_local_port.attr,
+	&dev_attr_fnode_ipv4_tos.attr,
+	&dev_attr_fnode_ipv6_traffic_class.attr,
+	&dev_attr_fnode_ipv6_flow_label.attr,
+	&dev_attr_fnode_link_local_ipv6.attr,
+	&dev_attr_fnode_tcp_xmit_wsf.attr,
+	&dev_attr_fnode_tcp_recv_wsf.attr,
+	&dev_attr_fnode_statsn.attr,
+	&dev_attr_fnode_exp_statsn.attr,
+	NULL,
+};
+
+static umode_t iscsi_flashnode_conn_attr_is_visible(struct kobject *kobj,
+						    struct attribute *attr,
+						    int i)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);
+	struct iscsi_transport *t = fnode_conn->transport;
+	int param;
+
+	if (attr == &dev_attr_fnode_is_fw_assigned_ipv6.attr) {
+		param = ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6;
+	} else if (attr == &dev_attr_fnode_header_digest.attr) {
+		param = ISCSI_FLASHNODE_HDR_DGST_EN;
+	} else if (attr == &dev_attr_fnode_data_digest.attr) {
+		param = ISCSI_FLASHNODE_DATA_DGST_EN;
+	} else if (attr == &dev_attr_fnode_snack_req.attr) {
+		param = ISCSI_FLASHNODE_SNACK_REQ_EN;
+	} else if (attr == &dev_attr_fnode_tcp_timestamp_stat.attr) {
+		param = ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT;
+	} else if (attr == &dev_attr_fnode_tcp_nagle_disable.attr) {
+		param = ISCSI_FLASHNODE_TCP_NAGLE_DISABLE;
+	} else if (attr == &dev_attr_fnode_tcp_wsf_disable.attr) {
+		param = ISCSI_FLASHNODE_TCP_WSF_DISABLE;
+	} else if (attr == &dev_attr_fnode_tcp_timer_scale.attr) {
+		param = ISCSI_FLASHNODE_TCP_TIMER_SCALE;
+	} else if (attr == &dev_attr_fnode_tcp_timestamp_enable.attr) {
+		param = ISCSI_FLASHNODE_TCP_TIMESTAMP_EN;
+	} else if (attr == &dev_attr_fnode_fragment_disable.attr) {
+		param = ISCSI_FLASHNODE_IP_FRAG_DISABLE;
+	} else if (attr == &dev_attr_fnode_max_recv_dlength.attr) {
+		param = ISCSI_FLASHNODE_MAX_RECV_DLENGTH;
+	} else if (attr == &dev_attr_fnode_max_xmit_dlength.attr) {
+		param = ISCSI_FLASHNODE_MAX_XMIT_DLENGTH;
+	} else if (attr == &dev_attr_fnode_keepalive_tmo.attr) {
+		param = ISCSI_FLASHNODE_KEEPALIVE_TMO;
+	} else if (attr == &dev_attr_fnode_port.attr) {
+		param = ISCSI_FLASHNODE_PORT;
+	} else if (attr == &dev_attr_fnode_ipaddress.attr) {
+		param = ISCSI_FLASHNODE_IPADDR;
+	} else if (attr == &dev_attr_fnode_redirect_ipaddr.attr) {
+		param = ISCSI_FLASHNODE_REDIRECT_IPADDR;
+	} else if (attr == &dev_attr_fnode_max_segment_size.attr) {
+		param = ISCSI_FLASHNODE_MAX_SEGMENT_SIZE;
+	} else if (attr == &dev_attr_fnode_local_port.attr) {
+		param = ISCSI_FLASHNODE_LOCAL_PORT;
+	} else if (attr == &dev_attr_fnode_ipv4_tos.attr) {
+		param = ISCSI_FLASHNODE_IPV4_TOS;
+	} else if (attr == &dev_attr_fnode_ipv6_traffic_class.attr) {
+		param = ISCSI_FLASHNODE_IPV6_TC;
+	} else if (attr == &dev_attr_fnode_ipv6_flow_label.attr) {
+		param = ISCSI_FLASHNODE_IPV6_FLOW_LABEL;
+	} else if (attr == &dev_attr_fnode_link_local_ipv6.attr) {
+		param = ISCSI_FLASHNODE_LINK_LOCAL_IPV6;
+	} else if (attr == &dev_attr_fnode_tcp_xmit_wsf.attr) {
+		param = ISCSI_FLASHNODE_TCP_XMIT_WSF;
+	} else if (attr == &dev_attr_fnode_tcp_recv_wsf.attr) {
+		param = ISCSI_FLASHNODE_TCP_RECV_WSF;
+	} else if (attr == &dev_attr_fnode_statsn.attr) {
+		param = ISCSI_FLASHNODE_STATSN;
+	} else if (attr == &dev_attr_fnode_exp_statsn.attr) {
+		param = ISCSI_FLASHNODE_EXP_STATSN;
+	} else {
+		WARN_ONCE(1, "Invalid flashnode connection attr");
+		return 0;
+	}
+
+	return t->attr_is_visible(ISCSI_FLASHNODE_PARAM, param);
+}
+
+static struct attribute_group iscsi_flashnode_conn_attr_group = {
+	.attrs = iscsi_flashnode_conn_attrs,
+	.is_visible = iscsi_flashnode_conn_attr_is_visible,
+};
+
+static const struct attribute_group *iscsi_flashnode_conn_attr_groups[] = {
+	&iscsi_flashnode_conn_attr_group,
+	NULL,
+};
+
+static void iscsi_flashnode_conn_release(struct device *dev)
+{
+	struct iscsi_bus_flash_conn *fnode_conn = iscsi_dev_to_flash_conn(dev);
+
+	kfree(fnode_conn->ipaddress);
+	kfree(fnode_conn->redirect_ipaddr);
+	kfree(fnode_conn->link_local_ipv6_addr);
+	kfree(fnode_conn);
+}
+
+struct device_type iscsi_flashnode_conn_dev_type = {
+	.name = "iscsi_flashnode_conn_dev_type",
+	.groups = iscsi_flashnode_conn_attr_groups,
+	.release = iscsi_flashnode_conn_release,
+};
+
+struct bus_type iscsi_flashnode_bus;
+
+int iscsi_flashnode_bus_match(struct device *dev,
+				     struct device_driver *drv)
+{
+	if (dev->bus == &iscsi_flashnode_bus)
+		return 1;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match);
+
+struct bus_type iscsi_flashnode_bus = {
+	.name = "iscsi_flashnode",
+	.match = &iscsi_flashnode_bus_match,
+};
+
+/**
+ * iscsi_create_flashnode_sess - Add flashnode session entry in sysfs
+ * @shost: pointer to host data
+ * @index: index of flashnode to add in sysfs
+ * @transport: pointer to transport data
+ * @dd_size: total size to allocate
+ *
+ * Adds a sysfs entry for the flashnode session attributes
+ *
+ * Returns:
+ *  pointer to allocated flashnode sess on sucess
+ *  %NULL on failure
+ */
+struct iscsi_bus_flash_session *
+iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index,
+			    struct iscsi_transport *transport,
+			    int dd_size)
+{
+	struct iscsi_bus_flash_session *fnode_sess;
+	int err;
+
+	fnode_sess = kzalloc(sizeof(*fnode_sess) + dd_size, GFP_KERNEL);
+	if (!fnode_sess)
+		return NULL;
+
+	fnode_sess->transport = transport;
+	fnode_sess->target_id = index;
+	fnode_sess->dev.type = &iscsi_flashnode_sess_dev_type;
+	fnode_sess->dev.bus = &iscsi_flashnode_bus;
+	fnode_sess->dev.parent = &shost->shost_gendev;
+	dev_set_name(&fnode_sess->dev, "flashnode_sess-%u:%u",
+		     shost->host_no, index);
+
+	err = device_register(&fnode_sess->dev);
+	if (err)
+		goto free_fnode_sess;
+
+	if (dd_size)
+		fnode_sess->dd_data = &fnode_sess[1];
+
+	return fnode_sess;
+
+free_fnode_sess:
+	kfree(fnode_sess);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_create_flashnode_sess);
+
+/**
+ * iscsi_create_flashnode_conn - Add flashnode conn entry in sysfs
+ * @shost: pointer to host data
+ * @fnode_sess: pointer to the parent flashnode session entry
+ * @transport: pointer to transport data
+ * @dd_size: total size to allocate
+ *
+ * Adds a sysfs entry for the flashnode connection attributes
+ *
+ * Returns:
+ *  pointer to allocated flashnode conn on success
+ *  %NULL on failure
+ */
+struct iscsi_bus_flash_conn *
+iscsi_create_flashnode_conn(struct Scsi_Host *shost,
+			    struct iscsi_bus_flash_session *fnode_sess,
+			    struct iscsi_transport *transport,
+			    int dd_size)
+{
+	struct iscsi_bus_flash_conn *fnode_conn;
+	int err;
+
+	fnode_conn = kzalloc(sizeof(*fnode_conn) + dd_size, GFP_KERNEL);
+	if (!fnode_conn)
+		return NULL;
+
+	fnode_conn->transport = transport;
+	fnode_conn->dev.type = &iscsi_flashnode_conn_dev_type;
+	fnode_conn->dev.bus = &iscsi_flashnode_bus;
+	fnode_conn->dev.parent = &fnode_sess->dev;
+	dev_set_name(&fnode_conn->dev, "flashnode_conn-%u:%u:0",
+		     shost->host_no, fnode_sess->target_id);
+
+	err = device_register(&fnode_conn->dev);
+	if (err)
+		goto free_fnode_conn;
+
+	if (dd_size)
+		fnode_conn->dd_data = &fnode_conn[1];
+
+	return fnode_conn;
+
+free_fnode_conn:
+	kfree(fnode_conn);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn);
+
+/**
+ * iscsi_is_flashnode_conn_dev - verify passed device is to be flashnode conn
+ * @dev: device to verify
+ * @data: pointer to data containing value to use for verification
+ *
+ * Verifies if the passed device is flashnode conn device
+ *
+ * Returns:
+ *  1 on success
+ *  0 on failure
+ */
+int iscsi_is_flashnode_conn_dev(struct device *dev, void *data)
+{
+	return dev->bus == &iscsi_flashnode_bus;
+}
+EXPORT_SYMBOL_GPL(iscsi_is_flashnode_conn_dev);
+
+static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn)
+{
+	device_unregister(&fnode_conn->dev);
+	return 0;
+}
+
+static int flashnode_match_index(struct device *dev, void *data)
+{
+	struct iscsi_bus_flash_session *fnode_sess = NULL;
+	int ret = 0;
+
+	if (!iscsi_flashnode_bus_match(dev, NULL))
+		goto exit_match_index;
+
+	fnode_sess = iscsi_dev_to_flash_session(dev);
+	ret = (fnode_sess->target_id == *((int *)data)) ? 1 : 0;
+
+exit_match_index:
+	return ret;
+}
+
+/**
+ * iscsi_get_flashnode_by_index -finds flashnode session entry by index
+ * @shost: pointer to host data
+ * @data: pointer to data containing value to use for comparison
+ * @fn: function pointer that does actual comparison
+ *
+ * Finds the flashnode session object for the passed index
+ *
+ * Returns:
+ *  pointer to found flashnode session object on success
+ *  %NULL on failure
+ */
+static struct iscsi_bus_flash_session *
+iscsi_get_flashnode_by_index(struct Scsi_Host *shost, void *data,
+			     int (*fn)(struct device *dev, void *data))
+{
+	struct iscsi_bus_flash_session *fnode_sess = NULL;
+	struct device *dev;
+
+	dev = device_find_child(&shost->shost_gendev, data, fn);
+	if (dev)
+		fnode_sess = iscsi_dev_to_flash_session(dev);
+
+	return fnode_sess;
+}
+
+/**
+ * iscsi_find_flashnode_sess - finds flashnode session entry
+ * @shost: pointer to host data
+ * @data: pointer to data containing value to use for comparison
+ * @fn: function pointer that does actual comparison
+ *
+ * Finds the flashnode session object comparing the data passed using logic
+ * defined in passed function pointer
+ *
+ * Returns:
+ *  pointer to found flashnode session device object on success
+ *  %NULL on failure
+ */
+struct device *
+iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
+			  int (*fn)(struct device *dev, void *data))
+{
+	struct device *dev;
+
+	dev = device_find_child(&shost->shost_gendev, data, fn);
+	return dev;
+}
+EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess);
+
+/**
+ * iscsi_find_flashnode_conn - finds flashnode connection entry
+ * @fnode_sess: pointer to parent flashnode session entry
+ * @data: pointer to data containing value to use for comparison
+ * @fn: function pointer that does actual comparison
+ *
+ * Finds the flashnode connection object comparing the data passed using logic
+ * defined in passed function pointer
+ *
+ * Returns:
+ *  pointer to found flashnode connection device object on success
+ *  %NULL on failure
+ */
+struct device *
+iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
+			  void *data,
+			  int (*fn)(struct device *dev, void *data))
+{
+	struct device *dev;
+
+	dev = device_find_child(&fnode_sess->dev, data, fn);
+	return dev;
+}
+EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn);
+
+static int iscsi_iter_destroy_flashnode_conn_fn(struct device *dev, void *data)
+{
+	if (!iscsi_is_flashnode_conn_dev(dev, NULL))
+		return 0;
+
+	return iscsi_destroy_flashnode_conn(iscsi_dev_to_flash_conn(dev));
+}
+
+/**
+ * iscsi_destroy_flashnode_sess - destory flashnode session entry
+ * @fnode_sess: pointer to flashnode session entry to be destroyed
+ *
+ * Deletes the flashnode session entry and all children flashnode connection
+ * entries from sysfs
+ */
+void iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess)
+{
+	int err;
+
+	err = device_for_each_child(&fnode_sess->dev, NULL,
+				    iscsi_iter_destroy_flashnode_conn_fn);
+	if (err)
+		pr_err("Could not delete all connections for %s. Error %d.\n",
+		       fnode_sess->dev.kobj.name, err);
+
+	device_unregister(&fnode_sess->dev);
+}
+EXPORT_SYMBOL_GPL(iscsi_destroy_flashnode_sess);
+
+static int iscsi_iter_destroy_flashnode_fn(struct device *dev, void *data)
+{
+	if (!iscsi_flashnode_bus_match(dev, NULL))
+		return 0;
+
+	iscsi_destroy_flashnode_sess(iscsi_dev_to_flash_session(dev));
+	return 0;
+}
+
+/**
+ * iscsi_destroy_all_flashnode - destory all flashnode session entries
+ * @shost: pointer to host data
+ *
+ * Destroys all the flashnode session entries and all corresponding children
+ * flashnode connection entries from sysfs
+ */
+void iscsi_destroy_all_flashnode(struct Scsi_Host *shost)
+{
+	device_for_each_child(&shost->shost_gendev, NULL,
+			      iscsi_iter_destroy_flashnode_fn);
+}
+EXPORT_SYMBOL_GPL(iscsi_destroy_all_flashnode);
+
+/*
  * BSG support
  */
 /**
@@ -2092,6 +2776,294 @@
 	return err;
 }
 
+static const struct {
+	enum iscsi_discovery_parent_type value;
+	char				*name;
+} iscsi_discovery_parent_names[] = {
+	{ISCSI_DISC_PARENT_UNKNOWN,	"Unknown" },
+	{ISCSI_DISC_PARENT_SENDTGT,	"Sendtarget" },
+	{ISCSI_DISC_PARENT_ISNS,	"isns" },
+};
+
+char *iscsi_get_discovery_parent_name(int parent_type)
+{
+	int i;
+	char *state = "Unknown!";
+
+	for (i = 0; i < ARRAY_SIZE(iscsi_discovery_parent_names); i++) {
+		if (iscsi_discovery_parent_names[i].value & parent_type) {
+			state = iscsi_discovery_parent_names[i].name;
+			break;
+		}
+	}
+	return state;
+}
+EXPORT_SYMBOL_GPL(iscsi_get_discovery_parent_name);
+
+static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
+				     struct iscsi_uevent *ev, uint32_t len)
+{
+	char *data = (char *)ev + sizeof(*ev);
+	struct Scsi_Host *shost;
+	struct iscsi_bus_flash_session *fnode_sess;
+	struct iscsi_bus_flash_conn *fnode_conn;
+	struct device *dev;
+	uint32_t *idx;
+	int err = 0;
+
+	if (!transport->set_flashnode_param) {
+		err = -ENOSYS;
+		goto exit_set_fnode;
+	}
+
+	shost = scsi_host_lookup(ev->u.set_flashnode.host_no);
+	if (!shost) {
+		pr_err("%s could not find host no %u\n",
+		       __func__, ev->u.set_flashnode.host_no);
+		err = -ENODEV;
+		goto put_host;
+	}
+
+	idx = &ev->u.set_flashnode.flashnode_idx;
+	fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
+						  flashnode_match_index);
+	if (!fnode_sess) {
+		pr_err("%s could not find flashnode %u for host no %u\n",
+		       __func__, *idx, ev->u.set_flashnode.host_no);
+		err = -ENODEV;
+		goto put_host;
+	}
+
+	dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
+					iscsi_is_flashnode_conn_dev);
+	if (!dev) {
+		err = -ENODEV;
+		goto put_host;
+	}
+
+	fnode_conn = iscsi_dev_to_flash_conn(dev);
+	err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len);
+
+put_host:
+	scsi_host_put(shost);
+
+exit_set_fnode:
+	return err;
+}
+
+static int iscsi_new_flashnode(struct iscsi_transport *transport,
+			       struct iscsi_uevent *ev, uint32_t len)
+{
+	char *data = (char *)ev + sizeof(*ev);
+	struct Scsi_Host *shost;
+	int index;
+	int err = 0;
+
+	if (!transport->new_flashnode) {
+		err = -ENOSYS;
+		goto exit_new_fnode;
+	}
+
+	shost = scsi_host_lookup(ev->u.new_flashnode.host_no);
+	if (!shost) {
+		pr_err("%s could not find host no %u\n",
+		       __func__, ev->u.new_flashnode.host_no);
+		err = -ENODEV;
+		goto put_host;
+	}
+
+	index = transport->new_flashnode(shost, data, len);
+
+	if (index >= 0)
+		ev->r.new_flashnode_ret.flashnode_idx = index;
+	else
+		err = -EIO;
+
+put_host:
+	scsi_host_put(shost);
+
+exit_new_fnode:
+	return err;
+}
+
+static int iscsi_del_flashnode(struct iscsi_transport *transport,
+			       struct iscsi_uevent *ev)
+{
+	struct Scsi_Host *shost;
+	struct iscsi_bus_flash_session *fnode_sess;
+	uint32_t *idx;
+	int err = 0;
+
+	if (!transport->del_flashnode) {
+		err = -ENOSYS;
+		goto exit_del_fnode;
+	}
+
+	shost = scsi_host_lookup(ev->u.del_flashnode.host_no);
+	if (!shost) {
+		pr_err("%s could not find host no %u\n",
+		       __func__, ev->u.del_flashnode.host_no);
+		err = -ENODEV;
+		goto put_host;
+	}
+
+	idx = &ev->u.del_flashnode.flashnode_idx;
+	fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
+						  flashnode_match_index);
+	if (!fnode_sess) {
+		pr_err("%s could not find flashnode %u for host no %u\n",
+		       __func__, *idx, ev->u.del_flashnode.host_no);
+		err = -ENODEV;
+		goto put_host;
+	}
+
+	err = transport->del_flashnode(fnode_sess);
+
+put_host:
+	scsi_host_put(shost);
+
+exit_del_fnode:
+	return err;
+}
+
+static int iscsi_login_flashnode(struct iscsi_transport *transport,
+				 struct iscsi_uevent *ev)
+{
+	struct Scsi_Host *shost;
+	struct iscsi_bus_flash_session *fnode_sess;
+	struct iscsi_bus_flash_conn *fnode_conn;
+	struct device *dev;
+	uint32_t *idx;
+	int err = 0;
+
+	if (!transport->login_flashnode) {
+		err = -ENOSYS;
+		goto exit_login_fnode;
+	}
+
+	shost = scsi_host_lookup(ev->u.login_flashnode.host_no);
+	if (!shost) {
+		pr_err("%s could not find host no %u\n",
+		       __func__, ev->u.login_flashnode.host_no);
+		err = -ENODEV;
+		goto put_host;
+	}
+
+	idx = &ev->u.login_flashnode.flashnode_idx;
+	fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
+						  flashnode_match_index);
+	if (!fnode_sess) {
+		pr_err("%s could not find flashnode %u for host no %u\n",
+		       __func__, *idx, ev->u.login_flashnode.host_no);
+		err = -ENODEV;
+		goto put_host;
+	}
+
+	dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
+					iscsi_is_flashnode_conn_dev);
+	if (!dev) {
+		err = -ENODEV;
+		goto put_host;
+	}
+
+	fnode_conn = iscsi_dev_to_flash_conn(dev);
+	err = transport->login_flashnode(fnode_sess, fnode_conn);
+
+put_host:
+	scsi_host_put(shost);
+
+exit_login_fnode:
+	return err;
+}
+
+static int iscsi_logout_flashnode(struct iscsi_transport *transport,
+				  struct iscsi_uevent *ev)
+{
+	struct Scsi_Host *shost;
+	struct iscsi_bus_flash_session *fnode_sess;
+	struct iscsi_bus_flash_conn *fnode_conn;
+	struct device *dev;
+	uint32_t *idx;
+	int err = 0;
+
+	if (!transport->logout_flashnode) {
+		err = -ENOSYS;
+		goto exit_logout_fnode;
+	}
+
+	shost = scsi_host_lookup(ev->u.logout_flashnode.host_no);
+	if (!shost) {
+		pr_err("%s could not find host no %u\n",
+		       __func__, ev->u.logout_flashnode.host_no);
+		err = -ENODEV;
+		goto put_host;
+	}
+
+	idx = &ev->u.logout_flashnode.flashnode_idx;
+	fnode_sess = iscsi_get_flashnode_by_index(shost, idx,
+						  flashnode_match_index);
+	if (!fnode_sess) {
+		pr_err("%s could not find flashnode %u for host no %u\n",
+		       __func__, *idx, ev->u.logout_flashnode.host_no);
+		err = -ENODEV;
+		goto put_host;
+	}
+
+	dev = iscsi_find_flashnode_conn(fnode_sess, NULL,
+					iscsi_is_flashnode_conn_dev);
+	if (!dev) {
+		err = -ENODEV;
+		goto put_host;
+	}
+
+	fnode_conn = iscsi_dev_to_flash_conn(dev);
+
+	err = transport->logout_flashnode(fnode_sess, fnode_conn);
+
+put_host:
+	scsi_host_put(shost);
+
+exit_logout_fnode:
+	return err;
+}
+
+static int iscsi_logout_flashnode_sid(struct iscsi_transport *transport,
+				      struct iscsi_uevent *ev)
+{
+	struct Scsi_Host *shost;
+	struct iscsi_cls_session *session;
+	int err = 0;
+
+	if (!transport->logout_flashnode_sid) {
+		err = -ENOSYS;
+		goto exit_logout_sid;
+	}
+
+	shost = scsi_host_lookup(ev->u.logout_flashnode_sid.host_no);
+	if (!shost) {
+		pr_err("%s could not find host no %u\n",
+		       __func__, ev->u.logout_flashnode.host_no);
+		err = -ENODEV;
+		goto put_host;
+	}
+
+	session = iscsi_session_lookup(ev->u.logout_flashnode_sid.sid);
+	if (!session) {
+		pr_err("%s could not find session id %u\n",
+		       __func__, ev->u.logout_flashnode_sid.sid);
+		err = -EINVAL;
+		goto put_host;
+	}
+
+	err = transport->logout_flashnode_sid(session);
+
+put_host:
+	scsi_host_put(shost);
+
+exit_logout_sid:
+	return err;
+}
+
 static int
 iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
 {
@@ -2246,6 +3218,27 @@
 	case ISCSI_UEVENT_DELETE_CHAP:
 		err = iscsi_delete_chap(transport, ev);
 		break;
+	case ISCSI_UEVENT_SET_FLASHNODE_PARAMS:
+		err = iscsi_set_flashnode_param(transport, ev,
+						nlmsg_attrlen(nlh,
+							      sizeof(*ev)));
+		break;
+	case ISCSI_UEVENT_NEW_FLASHNODE:
+		err = iscsi_new_flashnode(transport, ev,
+					  nlmsg_attrlen(nlh, sizeof(*ev)));
+		break;
+	case ISCSI_UEVENT_DEL_FLASHNODE:
+		err = iscsi_del_flashnode(transport, ev);
+		break;
+	case ISCSI_UEVENT_LOGIN_FLASHNODE:
+		err = iscsi_login_flashnode(transport, ev);
+		break;
+	case ISCSI_UEVENT_LOGOUT_FLASHNODE:
+		err = iscsi_logout_flashnode(transport, ev);
+		break;
+	case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID:
+		err = iscsi_logout_flashnode_sid(transport, ev);
+		break;
 	default:
 		err = -ENOSYS;
 		break;
@@ -2981,10 +3974,14 @@
 	if (err)
 		goto unregister_conn_class;
 
+	err = bus_register(&iscsi_flashnode_bus);
+	if (err)
+		goto unregister_session_class;
+
 	nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, &cfg);
 	if (!nls) {
 		err = -ENOBUFS;
-		goto unregister_session_class;
+		goto unregister_flashnode_bus;
 	}
 
 	iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh");
@@ -2995,6 +3992,8 @@
 
 release_nls:
 	netlink_kernel_release(nls);
+unregister_flashnode_bus:
+	bus_unregister(&iscsi_flashnode_bus);
 unregister_session_class:
 	transport_class_unregister(&iscsi_session_class);
 unregister_conn_class:
@@ -3014,6 +4013,7 @@
 {
 	destroy_workqueue(iscsi_eh_timer_workq);
 	netlink_kernel_release(nls);
+	bus_unregister(&iscsi_flashnode_bus);
 	transport_class_unregister(&iscsi_connection_class);
 	transport_class_unregister(&iscsi_session_class);
 	transport_class_unregister(&iscsi_host_class);
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile
index 5b9a2cf..13a9240 100644
--- a/drivers/target/iscsi/Makefile
+++ b/drivers/target/iscsi/Makefile
@@ -15,6 +15,7 @@
 				iscsi_target_util.o \
 				iscsi_target.o \
 				iscsi_target_configfs.o \
-				iscsi_target_stat.o
+				iscsi_target_stat.o \
+				iscsi_target_transport.o
 
 obj-$(CONFIG_ISCSI_TARGET)	+= iscsi_target_mod.o
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 7ea246a..ffbc6a9 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -49,6 +49,8 @@
 #include "iscsi_target_device.h"
 #include "iscsi_target_stat.h"
 
+#include <target/iscsi/iscsi_transport.h>
+
 static LIST_HEAD(g_tiqn_list);
 static LIST_HEAD(g_np_list);
 static DEFINE_SPINLOCK(tiqn_lock);
@@ -68,8 +70,7 @@
 struct kmem_cache *lio_r2t_cache;
 
 static int iscsit_handle_immediate_data(struct iscsi_cmd *,
-			unsigned char *buf, u32);
-static int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+			struct iscsi_scsi_req *, u32);
 
 struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
 {
@@ -401,8 +402,7 @@
 	spin_unlock_bh(&np_lock);
 
 	pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
-		np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
-		"TCP" : "SCTP");
+		np->np_ip, np->np_port, np->np_transport->name);
 
 	return np;
 }
@@ -441,11 +441,10 @@
 	return 0;
 }
 
-static int iscsit_del_np_comm(struct iscsi_np *np)
+static void iscsit_free_np(struct iscsi_np *np)
 {
 	if (np->np_socket)
 		sock_release(np->np_socket);
-	return 0;
 }
 
 int iscsit_del_np(struct iscsi_np *np)
@@ -467,20 +466,47 @@
 		send_sig(SIGINT, np->np_thread, 1);
 		kthread_stop(np->np_thread);
 	}
-	iscsit_del_np_comm(np);
+
+	np->np_transport->iscsit_free_np(np);
 
 	spin_lock_bh(&np_lock);
 	list_del(&np->np_list);
 	spin_unlock_bh(&np_lock);
 
 	pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
-		np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
-		"TCP" : "SCTP");
+		np->np_ip, np->np_port, np->np_transport->name);
 
+	iscsit_put_transport(np->np_transport);
 	kfree(np);
 	return 0;
 }
 
+static int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
+static int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
+
+static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+	iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+	return 0;
+}
+
+static struct iscsit_transport iscsi_target_transport = {
+	.name			= "iSCSI/TCP",
+	.transport_type		= ISCSI_TCP,
+	.owner			= NULL,
+	.iscsit_setup_np	= iscsit_setup_np,
+	.iscsit_accept_np	= iscsit_accept_np,
+	.iscsit_free_np		= iscsit_free_np,
+	.iscsit_alloc_cmd	= iscsit_alloc_cmd,
+	.iscsit_get_login_rx	= iscsit_get_login_rx,
+	.iscsit_put_login_tx	= iscsit_put_login_tx,
+	.iscsit_get_dataout	= iscsit_build_r2ts_for_cmd,
+	.iscsit_immediate_queue	= iscsit_immediate_queue,
+	.iscsit_response_queue	= iscsit_response_queue,
+	.iscsit_queue_data_in	= iscsit_queue_rsp,
+	.iscsit_queue_status	= iscsit_queue_rsp,
+};
+
 static int __init iscsi_target_init_module(void)
 {
 	int ret = 0;
@@ -557,6 +583,8 @@
 		goto ooo_out;
 	}
 
+	iscsit_register_transport(&iscsi_target_transport);
+
 	if (iscsit_load_discovery_tpg() < 0)
 		goto r2t_out;
 
@@ -587,6 +615,7 @@
 	iscsi_deallocate_thread_sets();
 	iscsi_thread_set_free();
 	iscsit_release_discovery_tpg();
+	iscsit_unregister_transport(&iscsi_target_transport);
 	kmem_cache_destroy(lio_cmd_cache);
 	kmem_cache_destroy(lio_qr_cache);
 	kmem_cache_destroy(lio_dr_cache);
@@ -682,11 +711,20 @@
 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
 
 	ret = wait_for_completion_interruptible(&cmd->reject_comp);
+	/*
+	 * Perform the kref_put now if se_cmd has already been setup by
+	 * scsit_setup_scsi_cmd()
+	 */
+	if (cmd->se_cmd.se_tfo != NULL) {
+		pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
+		target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+	}
 	if (ret != 0)
 		return -1;
 
 	return (!fail_conn) ? 0 : -1;
 }
+EXPORT_SYMBOL(iscsit_add_reject_from_cmd);
 
 /*
  * Map some portion of the allocated scatterlist to an iovec, suitable for
@@ -745,6 +783,9 @@
 
 	conn->exp_statsn = exp_statsn;
 
+	if (conn->sess->sess_ops->RDMAExtensions)
+		return;
+
 	spin_lock_bh(&conn->cmd_lock);
 	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
 		spin_lock(&cmd->istate_lock);
@@ -777,12 +818,10 @@
 	return 0;
 }
 
-static int iscsit_handle_scsi_cmd(
-	struct iscsi_conn *conn,
-	unsigned char *buf)
+int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			  unsigned char *buf)
 {
-	int data_direction, payload_length, cmdsn_ret = 0, immed_ret;
-	struct iscsi_cmd *cmd = NULL;
+	int data_direction, payload_length;
 	struct iscsi_scsi_req *hdr;
 	int iscsi_task_attr;
 	int sam_task_attr;
@@ -805,8 +844,8 @@
 	    !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
 		pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
 				" not set. Bad iSCSI Initiator.\n");
-		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
-				buf, conn);
+		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
+				1, 1, buf, cmd);
 	}
 
 	if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
@@ -826,8 +865,8 @@
 		pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
 			" set when Expected Data Transfer Length is 0 for"
 			" CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
-		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
-				buf, conn);
+		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
+				1, 1, buf, cmd);
 	}
 done:
 
@@ -836,29 +875,29 @@
 		pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
 			" MUST be set if Expected Data Transfer Length is not 0."
 			" Bad iSCSI Initiator\n");
-		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
-				buf, conn);
+		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
+				1, 1, buf, cmd);
 	}
 
 	if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
 	    (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
 		pr_err("Bidirectional operations not supported!\n");
-		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
-				buf, conn);
+		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
+				1, 1, buf, cmd);
 	}
 
 	if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
 		pr_err("Illegally set Immediate Bit in iSCSI Initiator"
 				" Scsi Command PDU.\n");
-		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
-				buf, conn);
+		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
+				1, 1, buf, cmd);
 	}
 
 	if (payload_length && !conn->sess->sess_ops->ImmediateData) {
 		pr_err("ImmediateData=No but DataSegmentLength=%u,"
 			" protocol error.\n", payload_length);
-		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
-				buf, conn);
+		return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
+				1, 1, buf, cmd);
 	}
 
 	if ((be32_to_cpu(hdr->data_length )== payload_length) &&
@@ -866,43 +905,38 @@
 		pr_err("Expected Data Transfer Length and Length of"
 			" Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
 			" bit is not set protocol error\n");
-		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
-				buf, conn);
+		return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
+				1, 1, buf, cmd);
 	}
 
 	if (payload_length > be32_to_cpu(hdr->data_length)) {
 		pr_err("DataSegmentLength: %u is greater than"
 			" EDTL: %u, protocol error.\n", payload_length,
 				hdr->data_length);
-		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
-				buf, conn);
+		return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
+				1, 1, buf, cmd);
 	}
 
 	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
 		pr_err("DataSegmentLength: %u is greater than"
 			" MaxXmitDataSegmentLength: %u, protocol error.\n",
 			payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
-		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
-				buf, conn);
+		return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
+				1, 1, buf, cmd);
 	}
 
 	if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
 		pr_err("DataSegmentLength: %u is greater than"
 			" FirstBurstLength: %u, protocol error.\n",
 			payload_length, conn->sess->sess_ops->FirstBurstLength);
-		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1,
-					buf, conn);
+		return iscsit_add_reject_from_cmd(ISCSI_REASON_BOOKMARK_INVALID,
+				1, 1, buf, cmd);
 	}
 
 	data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
 			 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
 			  DMA_NONE;
 
-	cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
-	if (!cmd)
-		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
-					 buf, conn);
-
 	cmd->data_direction = data_direction;
 	iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK;
 	/*
@@ -945,7 +979,8 @@
 	cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
 	cmd->first_burst_len	= payload_length;
 
-	if (cmd->data_direction == DMA_FROM_DEVICE) {
+	if (!conn->sess->sess_ops->RDMAExtensions &&
+	     cmd->data_direction == DMA_FROM_DEVICE) {
 		struct iscsi_datain_req *dr;
 
 		dr = iscsit_allocate_datain_req();
@@ -967,7 +1002,10 @@
 
 	pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
 		" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
-		hdr->cmdsn, hdr->data_length, payload_length, conn->cid);
+		hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
+		conn->cid);
+
+	target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
 
 	cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
 						     scsilun_to_int(&hdr->lun));
@@ -1001,12 +1039,24 @@
 	 */
 	core_alua_check_nonop_delay(&cmd->se_cmd);
 
-	if (iscsit_allocate_iovecs(cmd) < 0) {
-		return iscsit_add_reject_from_cmd(
-				ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-				1, 0, buf, cmd);
-	}
+	return 0;
+}
+EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
 
+void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *cmd)
+{
+	iscsit_set_dataout_sequence_values(cmd);
+
+	spin_lock_bh(&cmd->dataout_timeout_lock);
+	iscsit_start_dataout_timer(cmd, cmd->conn);
+	spin_unlock_bh(&cmd->dataout_timeout_lock);
+}
+EXPORT_SYMBOL(iscsit_set_unsoliticed_dataout);
+
+int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			    struct iscsi_scsi_req *hdr)
+{
+	int cmdsn_ret = 0;
 	/*
 	 * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
 	 * the Immediate Bit is not set, and no Immediate
@@ -1019,12 +1069,17 @@
 	 */
 	if (!cmd->immediate_data) {
 		cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
-		if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+		if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+			if (!cmd->sense_reason)
+				return 0;
+
+			target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
 			return 0;
-		else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+		} else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
 			return iscsit_add_reject_from_cmd(
 				ISCSI_REASON_PROTOCOL_ERROR,
-				1, 0, buf, cmd);
+				1, 0, (unsigned char *)hdr, cmd);
+		}
 	}
 
 	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
@@ -1033,25 +1088,23 @@
 	 * If no Immediate Data is attached, it's OK to return now.
 	 */
 	if (!cmd->immediate_data) {
-		if (!cmd->sense_reason && cmd->unsolicited_data) {
-			iscsit_set_dataout_sequence_values(cmd);
+		if (!cmd->sense_reason && cmd->unsolicited_data)
+			iscsit_set_unsoliticed_dataout(cmd);
+		if (!cmd->sense_reason)
+			return 0;
 
-			spin_lock_bh(&cmd->dataout_timeout_lock);
-			iscsit_start_dataout_timer(cmd, cmd->conn);
-			spin_unlock_bh(&cmd->dataout_timeout_lock);
-		}
-
+		target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
 		return 0;
 	}
 
 	/*
-	 * Early CHECK_CONDITIONs never make it to the transport processing
-	 * thread.  They are processed in CmdSN order by
-	 * iscsit_check_received_cmdsn() below.
+	 * Early CHECK_CONDITIONs with ImmediateData never make it to command
+	 * execution.  These exceptions are processed in CmdSN order using
+	 * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below.
 	 */
 	if (cmd->sense_reason) {
-		immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
-		goto after_immediate_data;
+		target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+		return 1;
 	}
 	/*
 	 * Call directly into transport_generic_new_cmd() to perform
@@ -1059,11 +1112,27 @@
 	 */
 	cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
 	if (cmd->sense_reason) {
-		immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
-		goto after_immediate_data;
+		target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
+		return 1;
 	}
 
-	immed_ret = iscsit_handle_immediate_data(cmd, buf, payload_length);
+	return 0;
+}
+EXPORT_SYMBOL(iscsit_process_scsi_cmd);
+
+static int
+iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
+			  bool dump_payload)
+{
+	int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+	/*
+	 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
+	 */
+	if (dump_payload == true)
+		goto after_immediate_data;
+
+	immed_ret = iscsit_handle_immediate_data(cmd, hdr,
+					cmd->first_burst_len);
 after_immediate_data:
 	if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
 		/*
@@ -1071,26 +1140,19 @@
 		 * DataCRC, check against ExpCmdSN/MaxCmdSN if
 		 * Immediate Bit is not set.
 		 */
-		cmdsn_ret = iscsit_sequence_cmd(conn, cmd, hdr->cmdsn);
-		/*
-		 * Special case for Unsupported SAM WRITE Opcodes
-		 * and ImmediateData=Yes.
-		 */
-		if (cmd->sense_reason) {
-			if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
-				return -1;
-		} else if (cmd->unsolicited_data) {
-			iscsit_set_dataout_sequence_values(cmd);
+		cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd, hdr->cmdsn);
 
-			spin_lock_bh(&cmd->dataout_timeout_lock);
-			iscsit_start_dataout_timer(cmd, cmd->conn);
-			spin_unlock_bh(&cmd->dataout_timeout_lock);
-		}
+		if (cmd->sense_reason) {
+			if (iscsit_dump_data_payload(cmd->conn,
+					cmd->first_burst_len, 1) < 0)
+				return -1;
+		} else if (cmd->unsolicited_data)
+			iscsit_set_unsoliticed_dataout(cmd);
 
 		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
 			return iscsit_add_reject_from_cmd(
 				ISCSI_REASON_PROTOCOL_ERROR,
-				1, 0, buf, cmd);
+				1, 0, (unsigned char *)hdr, cmd);
 
 	} else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
 		/*
@@ -1105,13 +1167,47 @@
 		 * CmdSN and issue a retry to plug the sequence.
 		 */
 		cmd->i_state = ISTATE_REMOVE;
-		iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+		iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, cmd->i_state);
 	} else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
 		return -1;
 
 	return 0;
 }
 
+static int
+iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			   unsigned char *buf)
+{
+	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
+	int rc, immed_data;
+	bool dump_payload = false;
+
+	rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
+	if (rc < 0)
+		return rc;
+	/*
+	 * Allocation iovecs needed for struct socket operations for
+	 * traditional iSCSI block I/O.
+	 */
+	if (iscsit_allocate_iovecs(cmd) < 0) {
+		return iscsit_add_reject_from_cmd(
+				ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+				1, 0, buf, cmd);
+	}
+	immed_data = cmd->immediate_data;
+
+	rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
+	if (rc < 0)
+		return rc;
+	else if (rc > 0)
+		dump_payload = true;
+
+	if (!immed_data)
+		return 0;
+
+	return iscsit_get_immediate_data(cmd, hdr, dump_payload);
+}
+
 static u32 iscsit_do_crypto_hash_sg(
 	struct hash_desc *hash,
 	struct iscsi_cmd *cmd,
@@ -1174,20 +1270,16 @@
 	crypto_hash_final(hash, data_crc);
 }
 
-static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
+int
+iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
+			  struct iscsi_cmd **out_cmd)
 {
-	int iov_ret, ooo_cmdsn = 0, ret;
-	u8 data_crc_failed = 0;
-	u32 checksum, iov_count = 0, padding = 0, rx_got = 0;
-	u32 rx_size = 0, payload_length;
+	struct iscsi_data *hdr = (struct iscsi_data *)buf;
 	struct iscsi_cmd *cmd = NULL;
 	struct se_cmd *se_cmd;
-	struct iscsi_data *hdr;
-	struct kvec *iov;
 	unsigned long flags;
-
-	hdr			= (struct iscsi_data *) buf;
-	payload_length		= ntoh24(hdr->dlength);
+	u32 payload_length = ntoh24(hdr->dlength);
+	int rc;
 
 	if (!payload_length) {
 		pr_err("DataOUT payload is ZERO, protocol error.\n");
@@ -1220,7 +1312,7 @@
 
 	pr_debug("Got DataOut ITT: 0x%08x, TTT: 0x%08x,"
 		" DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
-		hdr->itt, hdr->ttt, hdr->datasn, hdr->offset,
+		hdr->itt, hdr->ttt, hdr->datasn, ntohl(hdr->offset),
 		payload_length, conn->cid);
 
 	if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
@@ -1312,12 +1404,26 @@
 	 * Preform DataSN, DataSequenceInOrder, DataPDUInOrder, and
 	 * within-command recovery checks before receiving the payload.
 	 */
-	ret = iscsit_check_pre_dataout(cmd, buf);
-	if (ret == DATAOUT_WITHIN_COMMAND_RECOVERY)
+	rc = iscsit_check_pre_dataout(cmd, buf);
+	if (rc == DATAOUT_WITHIN_COMMAND_RECOVERY)
 		return 0;
-	else if (ret == DATAOUT_CANNOT_RECOVER)
+	else if (rc == DATAOUT_CANNOT_RECOVER)
 		return -1;
 
+	*out_cmd = cmd;
+	return 0;
+}
+EXPORT_SYMBOL(iscsit_check_dataout_hdr);
+
+static int
+iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+		   struct iscsi_data *hdr)
+{
+	struct kvec *iov;
+	u32 checksum, iov_count = 0, padding = 0, rx_got = 0, rx_size = 0;
+	u32 payload_length = ntoh24(hdr->dlength);
+	int iov_ret, data_crc_failed = 0;
+
 	rx_size += payload_length;
 	iov = &cmd->iov_data[0];
 
@@ -1370,17 +1476,27 @@
 				payload_length);
 		}
 	}
+
+	return data_crc_failed;
+}
+
+int
+iscsit_check_dataout_payload(struct iscsi_cmd *cmd, struct iscsi_data *hdr,
+			     bool data_crc_failed)
+{
+	struct iscsi_conn *conn = cmd->conn;
+	int rc, ooo_cmdsn;
 	/*
 	 * Increment post receive data and CRC values or perform
 	 * within-command recovery.
 	 */
-	ret = iscsit_check_post_dataout(cmd, buf, data_crc_failed);
-	if ((ret == DATAOUT_NORMAL) || (ret == DATAOUT_WITHIN_COMMAND_RECOVERY))
+	rc = iscsit_check_post_dataout(cmd, (unsigned char *)hdr, data_crc_failed);
+	if ((rc == DATAOUT_NORMAL) || (rc == DATAOUT_WITHIN_COMMAND_RECOVERY))
 		return 0;
-	else if (ret == DATAOUT_SEND_R2T) {
+	else if (rc == DATAOUT_SEND_R2T) {
 		iscsit_set_dataout_sequence_values(cmd);
-		iscsit_build_r2ts_for_cmd(cmd, conn, false);
-	} else if (ret == DATAOUT_SEND_TO_TRANSPORT) {
+		conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
+	} else if (rc == DATAOUT_SEND_TO_TRANSPORT) {
 		/*
 		 * Handle extra special case for out of order
 		 * Unsolicited Data Out.
@@ -1401,15 +1517,37 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(iscsit_check_dataout_payload);
 
-static int iscsit_handle_nop_out(
-	struct iscsi_conn *conn,
-	unsigned char *buf)
+static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
+{
+	struct iscsi_cmd *cmd;
+	struct iscsi_data *hdr = (struct iscsi_data *)buf;
+	int rc;
+	bool data_crc_failed = false;
+
+	rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
+	if (rc < 0)
+		return rc;
+	else if (!cmd)
+		return 0;
+
+	rc = iscsit_get_dataout(conn, cmd, hdr);
+	if (rc < 0)
+		return rc;
+	else if (rc > 0)
+		data_crc_failed = true;
+
+	return iscsit_check_dataout_payload(cmd, hdr, data_crc_failed);
+}
+
+int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			unsigned char *buf)
 {
 	unsigned char *ping_data = NULL;
 	int cmdsn_ret, niov = 0, ret = 0, rx_got, rx_size;
 	u32 checksum, data_crc, padding = 0, payload_length;
-	struct iscsi_cmd *cmd = NULL;
+	struct iscsi_cmd *cmd_p = NULL;
 	struct kvec *iov = NULL;
 	struct iscsi_nopout *hdr;
 
@@ -1432,7 +1570,7 @@
 					buf, conn);
 	}
 
-	pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%09x,"
+	pr_debug("Got NOPOUT Ping %s ITT: 0x%08x, TTT: 0x%08x,"
 		" CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
 		hdr->itt == RESERVED_ITT ? "Response" : "Request",
 		hdr->itt, hdr->ttt, hdr->cmdsn, hdr->exp_statsn,
@@ -1445,7 +1583,6 @@
 	 * can contain ping data.
 	 */
 	if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
-		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
 		if (!cmd)
 			return iscsit_add_reject(
 					ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1580,14 +1717,14 @@
 		/*
 		 * This was a response to a unsolicited NOPIN ping.
 		 */
-		cmd = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt));
-		if (!cmd)
+		cmd_p = iscsit_find_cmd_from_ttt(conn, be32_to_cpu(hdr->ttt));
+		if (!cmd_p)
 			return -1;
 
 		iscsit_stop_nopin_response_timer(conn);
 
-		cmd->i_state = ISTATE_REMOVE;
-		iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+		cmd_p->i_state = ISTATE_REMOVE;
+		iscsit_add_cmd_to_immediate_queue(cmd_p, conn, cmd_p->i_state);
 		iscsit_start_nopin_timer(conn);
 	} else {
 		/*
@@ -1611,12 +1748,12 @@
 	kfree(ping_data);
 	return ret;
 }
+EXPORT_SYMBOL(iscsit_handle_nop_out);
 
-static int iscsit_handle_task_mgt_cmd(
-	struct iscsi_conn *conn,
-	unsigned char *buf)
+int
+iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			   unsigned char *buf)
 {
-	struct iscsi_cmd *cmd;
 	struct se_tmr_req *se_tmr;
 	struct iscsi_tmr_req *tmr_req;
 	struct iscsi_tm *hdr;
@@ -1645,18 +1782,13 @@
 		pr_err("Task Management Request TASK_REASSIGN not"
 			" issued as immediate command, bad iSCSI Initiator"
 				"implementation\n");
-		return iscsit_add_reject(ISCSI_REASON_PROTOCOL_ERROR, 1,
-					buf, conn);
+		return iscsit_add_reject_from_cmd(ISCSI_REASON_PROTOCOL_ERROR,
+					1, 1, buf, cmd);
 	}
 	if ((function != ISCSI_TM_FUNC_ABORT_TASK) &&
 	    be32_to_cpu(hdr->refcmdsn) != ISCSI_RESERVED_TAG)
 		hdr->refcmdsn = cpu_to_be32(ISCSI_RESERVED_TAG);
 
-	cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
-	if (!cmd)
-		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-					 1, buf, conn);
-
 	cmd->data_direction = DMA_NONE;
 
 	cmd->tmr_req = kzalloc(sizeof(struct iscsi_tmr_req), GFP_KERNEL);
@@ -1827,6 +1959,7 @@
 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
 	return 0;
 }
+EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
 
 /* #warning FIXME: Support Text Command parameters besides SendTargets */
 static int iscsit_handle_text_cmd(
@@ -2089,13 +2222,12 @@
 	return 0;
 }
 
-static int iscsit_handle_logout_cmd(
-	struct iscsi_conn *conn,
-	unsigned char *buf)
+int
+iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+			unsigned char *buf)
 {
 	int cmdsn_ret, logout_remove = 0;
 	u8 reason_code = 0;
-	struct iscsi_cmd *cmd;
 	struct iscsi_logout *hdr;
 	struct iscsi_tiqn *tiqn = iscsit_snmp_get_tiqn(conn);
 
@@ -2119,14 +2251,10 @@
 	if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) {
 		pr_err("Received logout request on connection that"
 			" is not in logged in state, ignoring request.\n");
+		iscsit_release_cmd(cmd);
 		return 0;
 	}
 
-	cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
-	if (!cmd)
-		return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES, 1,
-					buf, conn);
-
 	cmd->iscsi_opcode       = ISCSI_OP_LOGOUT;
 	cmd->i_state            = ISTATE_SEND_LOGOUTRSP;
 	cmd->immediate_cmd      = ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
@@ -2176,6 +2304,7 @@
 
 	return logout_remove;
 }
+EXPORT_SYMBOL(iscsit_handle_logout_cmd);
 
 static int iscsit_handle_snack(
 	struct iscsi_conn *conn,
@@ -2243,7 +2372,7 @@
 
 static int iscsit_handle_immediate_data(
 	struct iscsi_cmd *cmd,
-	unsigned char *buf,
+	struct iscsi_scsi_req *hdr,
 	u32 length)
 {
 	int iov_ret, rx_got = 0, rx_size = 0;
@@ -2299,12 +2428,12 @@
 					" in ERL=0.\n");
 				iscsit_add_reject_from_cmd(
 						ISCSI_REASON_DATA_DIGEST_ERROR,
-						1, 0, buf, cmd);
+						1, 0, (unsigned char *)hdr, cmd);
 				return IMMEDIATE_DATA_CANNOT_RECOVER;
 			} else {
 				iscsit_add_reject_from_cmd(
 						ISCSI_REASON_DATA_DIGEST_ERROR,
-						0, 0, buf, cmd);
+						0, 0, (unsigned char *)hdr, cmd);
 				return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
 			}
 		} else {
@@ -2424,18 +2553,60 @@
 	}
 }
 
-static int iscsit_send_data_in(
-	struct iscsi_cmd *cmd,
-	struct iscsi_conn *conn)
+static void
+iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+			struct iscsi_datain *datain, struct iscsi_data_rsp *hdr,
+			bool set_statsn)
 {
-	int iov_ret = 0, set_statsn = 0;
-	u32 iov_count = 0, tx_size = 0;
+	hdr->opcode		= ISCSI_OP_SCSI_DATA_IN;
+	hdr->flags		= datain->flags;
+	if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
+		if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
+			hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
+			hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
+		} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
+			hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
+			hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
+		}
+	}
+	hton24(hdr->dlength, datain->length);
+	if (hdr->flags & ISCSI_FLAG_DATA_ACK)
+		int_to_scsilun(cmd->se_cmd.orig_fe_lun,
+				(struct scsi_lun *)&hdr->lun);
+	else
+		put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
+
+	hdr->itt		= cmd->init_task_tag;
+
+	if (hdr->flags & ISCSI_FLAG_DATA_ACK)
+		hdr->ttt		= cpu_to_be32(cmd->targ_xfer_tag);
+	else
+		hdr->ttt		= cpu_to_be32(0xFFFFFFFF);
+	if (set_statsn)
+		hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+	else
+		hdr->statsn		= cpu_to_be32(0xFFFFFFFF);
+
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+	hdr->datasn		= cpu_to_be32(datain->data_sn);
+	hdr->offset		= cpu_to_be32(datain->offset);
+
+	pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
+		" DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
+		cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
+		ntohl(hdr->offset), datain->length, conn->cid);
+}
+
+static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0];
 	struct iscsi_datain datain;
 	struct iscsi_datain_req *dr;
-	struct iscsi_data_rsp *hdr;
 	struct kvec *iov;
-	int eodr = 0;
-	int ret;
+	u32 iov_count = 0, tx_size = 0;
+	int eodr = 0, ret, iov_ret;
+	bool set_statsn = false;
 
 	memset(&datain, 0, sizeof(struct iscsi_datain));
 	dr = iscsit_get_datain_values(cmd, &datain);
@@ -2444,7 +2615,6 @@
 				cmd->init_task_tag);
 		return -1;
 	}
-
 	/*
 	 * Be paranoid and double check the logic for now.
 	 */
@@ -2452,7 +2622,7 @@
 		pr_err("Command ITT: 0x%08x, datain.offset: %u and"
 			" datain.length: %u exceeds cmd->data_length: %u\n",
 			cmd->init_task_tag, datain.offset, datain.length,
-				cmd->se_cmd.data_length);
+			cmd->se_cmd.data_length);
 		return -1;
 	}
 
@@ -2476,47 +2646,13 @@
 		    (dr->dr_complete == DATAIN_COMPLETE_CONNECTION_RECOVERY)) {
 			iscsit_increment_maxcmdsn(cmd, conn->sess);
 			cmd->stat_sn = conn->stat_sn++;
-			set_statsn = 1;
+			set_statsn = true;
 		} else if (dr->dr_complete ==
-				DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
-			set_statsn = 1;
+			   DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY)
+			set_statsn = true;
 	}
 
-	hdr	= (struct iscsi_data_rsp *) cmd->pdu;
-	memset(hdr, 0, ISCSI_HDR_LEN);
-	hdr->opcode		= ISCSI_OP_SCSI_DATA_IN;
-	hdr->flags		= datain.flags;
-	if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
-		if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
-			hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
-			hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
-		} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
-			hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
-			hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
-		}
-	}
-	hton24(hdr->dlength, datain.length);
-	if (hdr->flags & ISCSI_FLAG_DATA_ACK)
-		int_to_scsilun(cmd->se_cmd.orig_fe_lun,
-				(struct scsi_lun *)&hdr->lun);
-	else
-		put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
-
-	hdr->itt		= cmd->init_task_tag;
-
-	if (hdr->flags & ISCSI_FLAG_DATA_ACK)
-		hdr->ttt		= cpu_to_be32(cmd->targ_xfer_tag);
-	else
-		hdr->ttt		= cpu_to_be32(0xFFFFFFFF);
-	if (set_statsn)
-		hdr->statsn		= cpu_to_be32(cmd->stat_sn);
-	else
-		hdr->statsn		= cpu_to_be32(0xFFFFFFFF);
-
-	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
-	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
-	hdr->datasn		= cpu_to_be32(datain.data_sn);
-	hdr->offset		= cpu_to_be32(datain.offset);
+	iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn);
 
 	iov = &cmd->iov_data[0];
 	iov[iov_count].iov_base	= cmd->pdu;
@@ -2527,7 +2663,7 @@
 		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
 		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-				(unsigned char *)hdr, ISCSI_HDR_LEN,
+				(unsigned char *)cmd->pdu, ISCSI_HDR_LEN,
 				0, NULL, (u8 *)header_digest);
 
 		iov[0].iov_len += ISCSI_CRC_LEN;
@@ -2537,7 +2673,8 @@
 			" for DataIN PDU 0x%08x\n", *header_digest);
 	}
 
-	iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1], datain.offset, datain.length);
+	iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1],
+				datain.offset, datain.length);
 	if (iov_ret < 0)
 		return -1;
 
@@ -2568,11 +2705,6 @@
 	cmd->iov_data_count = iov_count;
 	cmd->tx_size = tx_size;
 
-	pr_debug("Built DataIN ITT: 0x%08x, StatSN: 0x%08x,"
-		" DataSN: 0x%08x, Offset: %u, Length: %u, CID: %hu\n",
-		cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
-		ntohl(hdr->offset), datain.length, conn->cid);
-
 	/* sendpage is preferred but can't insert markers */
 	if (!conn->conn_ops->IFMarker)
 		ret = iscsit_fe_sendpage_sg(cmd, conn);
@@ -2595,16 +2727,13 @@
 	return eodr;
 }
 
-static int iscsit_send_logout_response(
-	struct iscsi_cmd *cmd,
-	struct iscsi_conn *conn)
+int
+iscsit_build_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+			struct iscsi_logout_rsp *hdr)
 {
-	int niov = 0, tx_size;
 	struct iscsi_conn *logout_conn = NULL;
 	struct iscsi_conn_recovery *cr = NULL;
 	struct iscsi_session *sess = conn->sess;
-	struct kvec *iov;
-	struct iscsi_logout_rsp *hdr;
 	/*
 	 * The actual shutting down of Sessions and/or Connections
 	 * for CLOSESESSION and CLOSECONNECTION Logout Requests
@@ -2673,9 +2802,6 @@
 		return -1;
 	}
 
-	tx_size = ISCSI_HDR_LEN;
-	hdr			= (struct iscsi_logout_rsp *)cmd->pdu;
-	memset(hdr, 0, ISCSI_HDR_LEN);
 	hdr->opcode		= ISCSI_OP_LOGOUT_RSP;
 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
 	hdr->response		= cmd->logout_response;
@@ -2687,6 +2813,27 @@
 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
 	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
 
+	pr_debug("Built Logout Response ITT: 0x%08x StatSN:"
+		" 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
+		cmd->init_task_tag, cmd->stat_sn, hdr->response,
+		cmd->logout_cid, conn->cid);
+
+	return 0;
+}
+EXPORT_SYMBOL(iscsit_build_logout_rsp);
+
+static int
+iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct kvec *iov;
+	int niov = 0, tx_size, rc;
+
+	rc = iscsit_build_logout_rsp(cmd, conn,
+			(struct iscsi_logout_rsp *)&cmd->pdu[0]);
+	if (rc < 0)
+		return rc;
+
+	tx_size = ISCSI_HDR_LEN;
 	iov = &cmd->iov_misc[0];
 	iov[niov].iov_base	= cmd->pdu;
 	iov[niov++].iov_len	= ISCSI_HDR_LEN;
@@ -2695,7 +2842,7 @@
 		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
 		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-				(unsigned char *)hdr, ISCSI_HDR_LEN,
+				(unsigned char *)&cmd->pdu[0], ISCSI_HDR_LEN,
 				0, NULL, (u8 *)header_digest);
 
 		iov[0].iov_len += ISCSI_CRC_LEN;
@@ -2706,14 +2853,37 @@
 	cmd->iov_misc_count = niov;
 	cmd->tx_size = tx_size;
 
-	pr_debug("Sending Logout Response ITT: 0x%08x StatSN:"
-		" 0x%08x Response: 0x%02x CID: %hu on CID: %hu\n",
-		cmd->init_task_tag, cmd->stat_sn, hdr->response,
-		cmd->logout_cid, conn->cid);
-
 	return 0;
 }
 
+void
+iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+		       struct iscsi_nopin *hdr, bool nopout_response)
+{
+	hdr->opcode		= ISCSI_OP_NOOP_IN;
+	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
+        hton24(hdr->dlength, cmd->buf_ptr_size);
+	if (nopout_response)
+		put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
+	hdr->itt		= cmd->init_task_tag;
+	hdr->ttt		= cpu_to_be32(cmd->targ_xfer_tag);
+	cmd->stat_sn		= (nopout_response) ? conn->stat_sn++ :
+				  conn->stat_sn;
+	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+
+	if (nopout_response)
+		iscsit_increment_maxcmdsn(cmd, conn->sess);
+
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+
+	pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
+		" StatSN: 0x%08x, Length %u\n", (nopout_response) ?
+		"Solicitied" : "Unsolicitied", cmd->init_task_tag,
+		cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
+}
+EXPORT_SYMBOL(iscsit_build_nopin_rsp);
+
 /*
  *	Unsolicited NOPIN, either requesting a response or not.
  */
@@ -2722,20 +2892,10 @@
 	struct iscsi_conn *conn,
 	int want_response)
 {
-	int tx_size = ISCSI_HDR_LEN;
-	struct iscsi_nopin *hdr;
-	int ret;
+	struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
+	int tx_size = ISCSI_HDR_LEN, ret;
 
-	hdr			= (struct iscsi_nopin *) cmd->pdu;
-	memset(hdr, 0, ISCSI_HDR_LEN);
-	hdr->opcode		= ISCSI_OP_NOOP_IN;
-	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
-	hdr->itt		= cmd->init_task_tag;
-	hdr->ttt		= cpu_to_be32(cmd->targ_xfer_tag);
-	cmd->stat_sn		= conn->stat_sn;
-	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
-	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
-	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+	iscsit_build_nopin_rsp(cmd, conn, hdr, false);
 
 	if (conn->conn_ops->HeaderDigest) {
 		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
@@ -2771,31 +2931,17 @@
 	return 0;
 }
 
-static int iscsit_send_nopin_response(
-	struct iscsi_cmd *cmd,
-	struct iscsi_conn *conn)
+static int
+iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
 {
-	int niov = 0, tx_size;
-	u32 padding = 0;
+	struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
 	struct kvec *iov;
-	struct iscsi_nopin *hdr;
+	u32 padding = 0;
+	int niov = 0, tx_size;
+
+	iscsit_build_nopin_rsp(cmd, conn, hdr, true);
 
 	tx_size = ISCSI_HDR_LEN;
-	hdr			= (struct iscsi_nopin *) cmd->pdu;
-	memset(hdr, 0, ISCSI_HDR_LEN);
-	hdr->opcode		= ISCSI_OP_NOOP_IN;
-	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
-	hton24(hdr->dlength, cmd->buf_ptr_size);
-	put_unaligned_le64(0xFFFFFFFFFFFFFFFFULL, &hdr->lun);
-	hdr->itt		= cmd->init_task_tag;
-	hdr->ttt		= cpu_to_be32(cmd->targ_xfer_tag);
-	cmd->stat_sn		= conn->stat_sn++;
-	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
-
-	iscsit_increment_maxcmdsn(cmd, conn->sess);
-	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
-	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
-
 	iov = &cmd->iov_misc[0];
 	iov[niov].iov_base	= cmd->pdu;
 	iov[niov++].iov_len	= ISCSI_HDR_LEN;
@@ -2851,10 +2997,6 @@
 	cmd->iov_misc_count = niov;
 	cmd->tx_size = tx_size;
 
-	pr_debug("Sending NOPIN Response ITT: 0x%08x, TTT:"
-		" 0x%08x, StatSN: 0x%08x, Length %u\n", cmd->init_task_tag,
-		cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
-
 	return 0;
 }
 
@@ -2939,8 +3081,8 @@
  *		connection recovery.
  */
 int iscsit_build_r2ts_for_cmd(
-	struct iscsi_cmd *cmd,
 	struct iscsi_conn *conn,
+	struct iscsi_cmd *cmd,
 	bool recovery)
 {
 	int first_r2t = 1;
@@ -3015,24 +3157,16 @@
 	return 0;
 }
 
-static int iscsit_send_status(
-	struct iscsi_cmd *cmd,
-	struct iscsi_conn *conn)
+void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+			bool inc_stat_sn, struct iscsi_scsi_rsp *hdr)
 {
-	u8 iov_count = 0, recovery;
-	u32 padding = 0, tx_size = 0;
-	struct iscsi_scsi_rsp *hdr;
-	struct kvec *iov;
-
-	recovery = (cmd->i_state != ISTATE_SEND_STATUS);
-	if (!recovery)
+	if (inc_stat_sn)
 		cmd->stat_sn = conn->stat_sn++;
 
 	spin_lock_bh(&conn->sess->session_stats_lock);
 	conn->sess->rsp_pdus++;
 	spin_unlock_bh(&conn->sess->session_stats_lock);
 
-	hdr			= (struct iscsi_scsi_rsp *) cmd->pdu;
 	memset(hdr, 0, ISCSI_HDR_LEN);
 	hdr->opcode		= ISCSI_OP_SCSI_CMD_RSP;
 	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
@@ -3052,6 +3186,23 @@
 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
 	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
 
+	pr_debug("Built SCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
+		" Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
+		cmd->init_task_tag, cmd->stat_sn, cmd->se_cmd.scsi_status,
+		cmd->se_cmd.scsi_status, conn->cid);
+}
+EXPORT_SYMBOL(iscsit_build_rsp_pdu);
+
+static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0];
+	struct kvec *iov;
+	u32 padding = 0, tx_size = 0;
+	int iov_count = 0;
+	bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS);
+
+	iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr);
+
 	iov = &cmd->iov_misc[0];
 	iov[iov_count].iov_base	= cmd->pdu;
 	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
@@ -3106,7 +3257,7 @@
 		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
 		iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
-				(unsigned char *)hdr, ISCSI_HDR_LEN,
+				(unsigned char *)cmd->pdu, ISCSI_HDR_LEN,
 				0, NULL, (u8 *)header_digest);
 
 		iov[0].iov_len += ISCSI_CRC_LEN;
@@ -3118,11 +3269,6 @@
 	cmd->iov_misc_count = iov_count;
 	cmd->tx_size = tx_size;
 
-	pr_debug("Built %sSCSI Response, ITT: 0x%08x, StatSN: 0x%08x,"
-		" Response: 0x%02x, SAM Status: 0x%02x, CID: %hu\n",
-		(!recovery) ? "" : "Recovery ", cmd->init_task_tag,
-		cmd->stat_sn, 0x00, cmd->se_cmd.scsi_status, conn->cid);
-
 	return 0;
 }
 
@@ -3145,16 +3291,12 @@
 	}
 }
 
-static int iscsit_send_task_mgt_rsp(
-	struct iscsi_cmd *cmd,
-	struct iscsi_conn *conn)
+void
+iscsit_build_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+			  struct iscsi_tm_rsp *hdr)
 {
 	struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
-	struct iscsi_tm_rsp *hdr;
-	u32 tx_size = 0;
 
-	hdr			= (struct iscsi_tm_rsp *) cmd->pdu;
-	memset(hdr, 0, ISCSI_HDR_LEN);
 	hdr->opcode		= ISCSI_OP_SCSI_TMFUNC_RSP;
 	hdr->flags		= ISCSI_FLAG_CMD_FINAL;
 	hdr->response		= iscsit_convert_tcm_tmr_rsp(se_tmr);
@@ -3166,6 +3308,20 @@
 	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
 	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
 
+	pr_debug("Built Task Management Response ITT: 0x%08x,"
+		" StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
+		cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
+}
+EXPORT_SYMBOL(iscsit_build_task_mgt_rsp);
+
+static int
+iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
+{
+	struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0];
+	u32 tx_size = 0;
+
+	iscsit_build_task_mgt_rsp(cmd, conn, hdr);
+
 	cmd->iov_misc[0].iov_base	= cmd->pdu;
 	cmd->iov_misc[0].iov_len	= ISCSI_HDR_LEN;
 	tx_size += ISCSI_HDR_LEN;
@@ -3186,10 +3342,6 @@
 	cmd->iov_misc_count = 1;
 	cmd->tx_size = tx_size;
 
-	pr_debug("Built Task Management Response ITT: 0x%08x,"
-		" StatSN: 0x%08x, Response: 0x%02x, CID: %hu\n",
-		cmd->init_task_tag, cmd->stat_sn, hdr->response, conn->cid);
-
 	return 0;
 }
 
@@ -3385,6 +3537,22 @@
 	return 0;
 }
 
+void
+iscsit_build_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
+		    struct iscsi_reject *hdr)
+{
+	hdr->opcode		= ISCSI_OP_REJECT;
+	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
+	hton24(hdr->dlength, ISCSI_HDR_LEN);
+	hdr->ffffffff		= cpu_to_be32(0xffffffff);
+	cmd->stat_sn		= conn->stat_sn++;
+	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
+	hdr->exp_cmdsn		= cpu_to_be32(conn->sess->exp_cmd_sn);
+	hdr->max_cmdsn		= cpu_to_be32(conn->sess->max_cmd_sn);
+
+}
+EXPORT_SYMBOL(iscsit_build_reject);
+
 static int iscsit_send_reject(
 	struct iscsi_cmd *cmd,
 	struct iscsi_conn *conn)
@@ -3393,18 +3561,9 @@
 	struct iscsi_reject *hdr;
 	struct kvec *iov;
 
-	hdr			= (struct iscsi_reject *) cmd->pdu;
-	hdr->opcode		= ISCSI_OP_REJECT;
-	hdr->flags		|= ISCSI_FLAG_CMD_FINAL;
-	hton24(hdr->dlength, ISCSI_HDR_LEN);
-	hdr->ffffffff		= cpu_to_be32(0xffffffff);
-	cmd->stat_sn		= conn->stat_sn++;
-	hdr->statsn		= cpu_to_be32(cmd->stat_sn);
-	hdr->exp_cmdsn	= cpu_to_be32(conn->sess->exp_cmd_sn);
-	hdr->max_cmdsn	= cpu_to_be32(conn->sess->max_cmd_sn);
+	iscsit_build_reject(cmd, conn, (struct iscsi_reject *)&cmd->pdu[0]);
 
 	iov = &cmd->iov_misc[0];
-
 	iov[iov_count].iov_base = cmd->pdu;
 	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
 	iov[iov_count].iov_base = cmd->buf_ptr;
@@ -3501,8 +3660,53 @@
 	set_cpus_allowed_ptr(p, conn->conn_cpumask);
 }
 
-static int handle_immediate_queue(struct iscsi_conn *conn)
+static int
+iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
 {
+	int ret;
+
+	switch (state) {
+	case ISTATE_SEND_R2T:
+		ret = iscsit_send_r2t(cmd, conn);
+		if (ret < 0)
+			goto err;
+		break;
+	case ISTATE_REMOVE:
+		spin_lock_bh(&conn->cmd_lock);
+		list_del(&cmd->i_conn_node);
+		spin_unlock_bh(&conn->cmd_lock);
+
+		iscsit_free_cmd(cmd);
+		break;
+	case ISTATE_SEND_NOPIN_WANT_RESPONSE:
+		iscsit_mod_nopin_response_timer(conn);
+		ret = iscsit_send_unsolicited_nopin(cmd, conn, 1);
+		if (ret < 0)
+			goto err;
+		break;
+	case ISTATE_SEND_NOPIN_NO_RESPONSE:
+		ret = iscsit_send_unsolicited_nopin(cmd, conn, 0);
+		if (ret < 0)
+			goto err;
+		break;
+	default:
+		pr_err("Unknown Opcode: 0x%02x ITT:"
+		       " 0x%08x, i_state: %d on CID: %hu\n",
+		       cmd->iscsi_opcode, cmd->init_task_tag, state,
+		       conn->cid);
+		goto err;
+	}
+
+	return 0;
+
+err:
+	return -1;
+}
+
+static int
+iscsit_handle_immediate_queue(struct iscsi_conn *conn)
+{
+	struct iscsit_transport *t = conn->conn_transport;
 	struct iscsi_queue_req *qr;
 	struct iscsi_cmd *cmd;
 	u8 state;
@@ -3514,52 +3718,139 @@
 		state = qr->state;
 		kmem_cache_free(lio_qr_cache, qr);
 
-		switch (state) {
-		case ISTATE_SEND_R2T:
-			ret = iscsit_send_r2t(cmd, conn);
-			if (ret < 0)
-				goto err;
-			break;
-		case ISTATE_REMOVE:
-			if (cmd->data_direction == DMA_TO_DEVICE)
-				iscsit_stop_dataout_timer(cmd);
+		ret = t->iscsit_immediate_queue(conn, cmd, state);
+		if (ret < 0)
+			return ret;
+	}
 
-			spin_lock_bh(&conn->cmd_lock);
-			list_del(&cmd->i_conn_node);
-			spin_unlock_bh(&conn->cmd_lock);
+	return 0;
+}
 
-			iscsit_free_cmd(cmd);
-			continue;
-		case ISTATE_SEND_NOPIN_WANT_RESPONSE:
-			iscsit_mod_nopin_response_timer(conn);
-			ret = iscsit_send_unsolicited_nopin(cmd,
-							    conn, 1);
-			if (ret < 0)
-				goto err;
+static int
+iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+{
+	int ret;
+
+check_rsp_state:
+	switch (state) {
+	case ISTATE_SEND_DATAIN:
+		ret = iscsit_send_datain(cmd, conn);
+		if (ret < 0)
+			goto err;
+		else if (!ret)
+			/* more drs */
+			goto check_rsp_state;
+		else if (ret == 1) {
+			/* all done */
+			spin_lock_bh(&cmd->istate_lock);
+			cmd->i_state = ISTATE_SENT_STATUS;
+			spin_unlock_bh(&cmd->istate_lock);
+
+			if (atomic_read(&conn->check_immediate_queue))
+				return 1;
+
+			return 0;
+		} else if (ret == 2) {
+			/* Still must send status,
+			   SCF_TRANSPORT_TASK_SENSE was set */
+			spin_lock_bh(&cmd->istate_lock);
+			cmd->i_state = ISTATE_SEND_STATUS;
+			spin_unlock_bh(&cmd->istate_lock);
+			state = ISTATE_SEND_STATUS;
+			goto check_rsp_state;
+		}
+
+		break;
+	case ISTATE_SEND_STATUS:
+	case ISTATE_SEND_STATUS_RECOVERY:
+		ret = iscsit_send_response(cmd, conn);
+		break;
+	case ISTATE_SEND_LOGOUTRSP:
+		ret = iscsit_send_logout(cmd, conn);
+		break;
+	case ISTATE_SEND_ASYNCMSG:
+		ret = iscsit_send_conn_drop_async_message(
+			cmd, conn);
+		break;
+	case ISTATE_SEND_NOPIN:
+		ret = iscsit_send_nopin(cmd, conn);
+		break;
+	case ISTATE_SEND_REJECT:
+		ret = iscsit_send_reject(cmd, conn);
+		break;
+	case ISTATE_SEND_TASKMGTRSP:
+		ret = iscsit_send_task_mgt_rsp(cmd, conn);
+		if (ret != 0)
 			break;
-		case ISTATE_SEND_NOPIN_NO_RESPONSE:
-			ret = iscsit_send_unsolicited_nopin(cmd,
-							    conn, 0);
-			if (ret < 0)
-				goto err;
-			break;
-		default:
-			pr_err("Unknown Opcode: 0x%02x ITT:"
-			       " 0x%08x, i_state: %d on CID: %hu\n",
-			       cmd->iscsi_opcode, cmd->init_task_tag, state,
-			       conn->cid);
+		ret = iscsit_tmr_post_handler(cmd, conn);
+		if (ret != 0)
+			iscsit_fall_back_to_erl0(conn->sess);
+		break;
+	case ISTATE_SEND_TEXTRSP:
+		ret = iscsit_send_text_rsp(cmd, conn);
+		break;
+	default:
+		pr_err("Unknown Opcode: 0x%02x ITT:"
+		       " 0x%08x, i_state: %d on CID: %hu\n",
+		       cmd->iscsi_opcode, cmd->init_task_tag,
+		       state, conn->cid);
+		goto err;
+	}
+	if (ret < 0)
+		goto err;
+
+	if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
+		iscsit_tx_thread_wait_for_tcp(conn);
+		iscsit_unmap_iovec(cmd);
+		goto err;
+	}
+	iscsit_unmap_iovec(cmd);
+
+	switch (state) {
+	case ISTATE_SEND_LOGOUTRSP:
+		if (!iscsit_logout_post_handler(cmd, conn))
+			goto restart;
+		/* fall through */
+	case ISTATE_SEND_STATUS:
+	case ISTATE_SEND_ASYNCMSG:
+	case ISTATE_SEND_NOPIN:
+	case ISTATE_SEND_STATUS_RECOVERY:
+	case ISTATE_SEND_TEXTRSP:
+	case ISTATE_SEND_TASKMGTRSP:
+		spin_lock_bh(&cmd->istate_lock);
+		cmd->i_state = ISTATE_SENT_STATUS;
+		spin_unlock_bh(&cmd->istate_lock);
+		break;
+	case ISTATE_SEND_REJECT:
+		if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
+			cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN;
+			complete(&cmd->reject_comp);
 			goto err;
 		}
+		complete(&cmd->reject_comp);
+		break;
+	default:
+		pr_err("Unknown Opcode: 0x%02x ITT:"
+		       " 0x%08x, i_state: %d on CID: %hu\n",
+		       cmd->iscsi_opcode, cmd->init_task_tag,
+		       cmd->i_state, conn->cid);
+		goto err;
 	}
 
+	if (atomic_read(&conn->check_immediate_queue))
+		return 1;
+
 	return 0;
 
 err:
 	return -1;
+restart:
+	return -EAGAIN;
 }
 
-static int handle_response_queue(struct iscsi_conn *conn)
+static int iscsit_handle_response_queue(struct iscsi_conn *conn)
 {
+	struct iscsit_transport *t = conn->conn_transport;
 	struct iscsi_queue_req *qr;
 	struct iscsi_cmd *cmd;
 	u8 state;
@@ -3570,122 +3861,12 @@
 		state = qr->state;
 		kmem_cache_free(lio_qr_cache, qr);
 
-check_rsp_state:
-		switch (state) {
-		case ISTATE_SEND_DATAIN:
-			ret = iscsit_send_data_in(cmd, conn);
-			if (ret < 0)
-				goto err;
-			else if (!ret)
-				/* more drs */
-				goto check_rsp_state;
-			else if (ret == 1) {
-				/* all done */
-				spin_lock_bh(&cmd->istate_lock);
-				cmd->i_state = ISTATE_SENT_STATUS;
-				spin_unlock_bh(&cmd->istate_lock);
-
-				if (atomic_read(&conn->check_immediate_queue))
-					return 1;
-
-				continue;
-			} else if (ret == 2) {
-				/* Still must send status,
-				   SCF_TRANSPORT_TASK_SENSE was set */
-				spin_lock_bh(&cmd->istate_lock);
-				cmd->i_state = ISTATE_SEND_STATUS;
-				spin_unlock_bh(&cmd->istate_lock);
-				state = ISTATE_SEND_STATUS;
-				goto check_rsp_state;
-			}
-
-			break;
-		case ISTATE_SEND_STATUS:
-		case ISTATE_SEND_STATUS_RECOVERY:
-			ret = iscsit_send_status(cmd, conn);
-			break;
-		case ISTATE_SEND_LOGOUTRSP:
-			ret = iscsit_send_logout_response(cmd, conn);
-			break;
-		case ISTATE_SEND_ASYNCMSG:
-			ret = iscsit_send_conn_drop_async_message(
-				cmd, conn);
-			break;
-		case ISTATE_SEND_NOPIN:
-			ret = iscsit_send_nopin_response(cmd, conn);
-			break;
-		case ISTATE_SEND_REJECT:
-			ret = iscsit_send_reject(cmd, conn);
-			break;
-		case ISTATE_SEND_TASKMGTRSP:
-			ret = iscsit_send_task_mgt_rsp(cmd, conn);
-			if (ret != 0)
-				break;
-			ret = iscsit_tmr_post_handler(cmd, conn);
-			if (ret != 0)
-				iscsit_fall_back_to_erl0(conn->sess);
-			break;
-		case ISTATE_SEND_TEXTRSP:
-			ret = iscsit_send_text_rsp(cmd, conn);
-			break;
-		default:
-			pr_err("Unknown Opcode: 0x%02x ITT:"
-			       " 0x%08x, i_state: %d on CID: %hu\n",
-			       cmd->iscsi_opcode, cmd->init_task_tag,
-			       state, conn->cid);
-			goto err;
-		}
-		if (ret < 0)
-			goto err;
-
-		if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
-			iscsit_tx_thread_wait_for_tcp(conn);
-			iscsit_unmap_iovec(cmd);
-			goto err;
-		}
-		iscsit_unmap_iovec(cmd);
-
-		switch (state) {
-		case ISTATE_SEND_LOGOUTRSP:
-			if (!iscsit_logout_post_handler(cmd, conn))
-				goto restart;
-			/* fall through */
-		case ISTATE_SEND_STATUS:
-		case ISTATE_SEND_ASYNCMSG:
-		case ISTATE_SEND_NOPIN:
-		case ISTATE_SEND_STATUS_RECOVERY:
-		case ISTATE_SEND_TEXTRSP:
-		case ISTATE_SEND_TASKMGTRSP:
-			spin_lock_bh(&cmd->istate_lock);
-			cmd->i_state = ISTATE_SENT_STATUS;
-			spin_unlock_bh(&cmd->istate_lock);
-			break;
-		case ISTATE_SEND_REJECT:
-			if (cmd->cmd_flags & ICF_REJECT_FAIL_CONN) {
-				cmd->cmd_flags &= ~ICF_REJECT_FAIL_CONN;
-				complete(&cmd->reject_comp);
-				goto err;
-			}
-			complete(&cmd->reject_comp);
-			break;
-		default:
-			pr_err("Unknown Opcode: 0x%02x ITT:"
-			       " 0x%08x, i_state: %d on CID: %hu\n",
-			       cmd->iscsi_opcode, cmd->init_task_tag,
-			       cmd->i_state, conn->cid);
-			goto err;
-		}
-
-		if (atomic_read(&conn->check_immediate_queue))
-			return 1;
+		ret = t->iscsit_response_queue(conn, cmd, state);
+		if (ret == 1 || ret < 0)
+			return ret;
 	}
 
 	return 0;
-
-err:
-	return -1;
-restart:
-	return -EAGAIN;
 }
 
 int iscsi_target_tx_thread(void *arg)
@@ -3722,11 +3903,11 @@
 			goto transport_err;
 
 get_immediate:
-		ret = handle_immediate_queue(conn);
+		ret = iscsit_handle_immediate_queue(conn);
 		if (ret < 0)
 			goto transport_err;
 
-		ret = handle_response_queue(conn);
+		ret = iscsit_handle_response_queue(conn);
 		if (ret == 1)
 			goto get_immediate;
 		else if (ret == -EAGAIN)
@@ -3742,6 +3923,83 @@
 	return 0;
 }
 
+static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
+{
+	struct iscsi_hdr *hdr = (struct iscsi_hdr *)buf;
+	struct iscsi_cmd *cmd;
+	int ret = 0;
+
+	switch (hdr->opcode & ISCSI_OPCODE_MASK) {
+	case ISCSI_OP_SCSI_CMD:
+		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+		if (!cmd)
+			return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+						1, buf, conn);
+
+		ret = iscsit_handle_scsi_cmd(conn, cmd, buf);
+		break;
+	case ISCSI_OP_SCSI_DATA_OUT:
+		ret = iscsit_handle_data_out(conn, buf);
+		break;
+	case ISCSI_OP_NOOP_OUT:
+		cmd = NULL;
+		if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
+			cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+			if (!cmd)
+				return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+						1, buf, conn);
+		}
+		ret = iscsit_handle_nop_out(conn, cmd, buf);
+		break;
+	case ISCSI_OP_SCSI_TMFUNC:
+		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+		if (!cmd)
+			return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+						1, buf, conn);
+
+		ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
+		break;
+	case ISCSI_OP_TEXT:
+		ret = iscsit_handle_text_cmd(conn, buf);
+		break;
+	case ISCSI_OP_LOGOUT:
+		cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+		if (!cmd)
+			return iscsit_add_reject(ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+						1, buf, conn);
+
+		ret = iscsit_handle_logout_cmd(conn, cmd, buf);
+		if (ret > 0)
+			wait_for_completion_timeout(&conn->conn_logout_comp,
+					SECONDS_FOR_LOGOUT_COMP * HZ);
+		break;
+	case ISCSI_OP_SNACK:
+		ret = iscsit_handle_snack(conn, buf);
+		break;
+	default:
+		pr_err("Got unknown iSCSI OpCode: 0x%02x\n", hdr->opcode);
+		if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+			pr_err("Cannot recover from unknown"
+			" opcode while ERL=0, closing iSCSI connection.\n");
+			return -1;
+		}
+		if (!conn->conn_ops->OFMarker) {
+			pr_err("Unable to recover from unknown"
+			" opcode while OFMarker=No, closing iSCSI"
+				" connection.\n");
+			return -1;
+		}
+		if (iscsit_recover_from_unknown_opcode(conn) < 0) {
+			pr_err("Unable to recover from unknown"
+				" opcode, closing iSCSI connection.\n");
+			return -1;
+		}
+		break;
+	}
+
+	return ret;
+}
+
 int iscsi_target_rx_thread(void *arg)
 {
 	int ret;
@@ -3761,6 +4019,18 @@
 	if (!conn)
 		goto out;
 
+	if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
+		struct completion comp;
+		int rc;
+
+		init_completion(&comp);
+		rc = wait_for_completion_interruptible(&comp);
+		if (rc < 0)
+			goto transport_err;
+
+		goto out;
+	}
+
 	while (!kthread_should_stop()) {
 		/*
 		 * Ensure that both TX and RX per connection kthreads
@@ -3832,62 +4102,9 @@
 			goto transport_err;
 		}
 
-		switch (opcode) {
-		case ISCSI_OP_SCSI_CMD:
-			if (iscsit_handle_scsi_cmd(conn, buffer) < 0)
-				goto transport_err;
-			break;
-		case ISCSI_OP_SCSI_DATA_OUT:
-			if (iscsit_handle_data_out(conn, buffer) < 0)
-				goto transport_err;
-			break;
-		case ISCSI_OP_NOOP_OUT:
-			if (iscsit_handle_nop_out(conn, buffer) < 0)
-				goto transport_err;
-			break;
-		case ISCSI_OP_SCSI_TMFUNC:
-			if (iscsit_handle_task_mgt_cmd(conn, buffer) < 0)
-				goto transport_err;
-			break;
-		case ISCSI_OP_TEXT:
-			if (iscsit_handle_text_cmd(conn, buffer) < 0)
-				goto transport_err;
-			break;
-		case ISCSI_OP_LOGOUT:
-			ret = iscsit_handle_logout_cmd(conn, buffer);
-			if (ret > 0) {
-				wait_for_completion_timeout(&conn->conn_logout_comp,
-						SECONDS_FOR_LOGOUT_COMP * HZ);
-				goto transport_err;
-			} else if (ret < 0)
-				goto transport_err;
-			break;
-		case ISCSI_OP_SNACK:
-			if (iscsit_handle_snack(conn, buffer) < 0)
-				goto transport_err;
-			break;
-		default:
-			pr_err("Got unknown iSCSI OpCode: 0x%02x\n",
-					opcode);
-			if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
-				pr_err("Cannot recover from unknown"
-				" opcode while ERL=0, closing iSCSI connection"
-				".\n");
-				goto transport_err;
-			}
-			if (!conn->conn_ops->OFMarker) {
-				pr_err("Unable to recover from unknown"
-				" opcode while OFMarker=No, closing iSCSI"
-					" connection.\n");
-				goto transport_err;
-			}
-			if (iscsit_recover_from_unknown_opcode(conn) < 0) {
-				pr_err("Unable to recover from unknown"
-					" opcode, closing iSCSI connection.\n");
-				goto transport_err;
-			}
-			break;
-		}
+		ret = iscsi_target_rx_opcode(conn, buffer);
+		if (ret < 0)
+			goto transport_err;
 	}
 
 transport_err:
@@ -4053,6 +4270,12 @@
 
 	if (conn->sock)
 		sock_release(conn->sock);
+
+	if (conn->conn_transport->iscsit_free_conn)
+		conn->conn_transport->iscsit_free_conn(conn);
+
+	iscsit_put_transport(conn->conn_transport);
+
 	conn->thread_set = NULL;
 
 	pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
@@ -4284,7 +4507,7 @@
 /*
  *	Return of 0 causes the TX thread to restart.
  */
-static int iscsit_logout_post_handler(
+int iscsit_logout_post_handler(
 	struct iscsi_cmd *cmd,
 	struct iscsi_conn *conn)
 {
@@ -4342,6 +4565,7 @@
 	}
 	return ret;
 }
+EXPORT_SYMBOL(iscsit_logout_post_handler);
 
 void iscsit_fail_session(struct iscsi_session *sess)
 {
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index b1a1e63..a0050b2 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -16,11 +16,12 @@
 				struct iscsi_portal_group *);
 extern int iscsit_del_np(struct iscsi_np *);
 extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *, struct iscsi_cmd *);
+extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
 extern int iscsit_logout_closesession(struct iscsi_cmd *, struct iscsi_conn *);
 extern int iscsit_logout_closeconnection(struct iscsi_cmd *, struct iscsi_conn *);
 extern int iscsit_logout_removeconnforrecovery(struct iscsi_cmd *, struct iscsi_conn *);
 extern int iscsit_send_async_msg(struct iscsi_conn *, u16, u8, u8);
-extern int iscsit_build_r2ts_for_cmd(struct iscsi_cmd *, struct iscsi_conn *, bool recovery);
+extern int iscsit_build_r2ts_for_cmd(struct iscsi_conn *, struct iscsi_cmd *, bool recovery);
 extern void iscsit_thread_get_cpumask(struct iscsi_conn *);
 extern int iscsi_target_tx_thread(void *);
 extern int iscsi_target_rx_thread(void *);
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index a0fc7b9..cee1754 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -49,32 +49,6 @@
 	}
 }
 
-static void chap_set_random(char *data, int length)
-{
-	long r;
-	unsigned n;
-
-	while (length > 0) {
-		get_random_bytes(&r, sizeof(long));
-		r = r ^ (r >> 8);
-		r = r ^ (r >> 4);
-		n = r & 0x7;
-
-		get_random_bytes(&r, sizeof(long));
-		r = r ^ (r >> 8);
-		r = r ^ (r >> 5);
-		n = (n << 3) | (r & 0x7);
-
-		get_random_bytes(&r, sizeof(long));
-		r = r ^ (r >> 8);
-		r = r ^ (r >> 5);
-		n = (n << 2) | (r & 0x3);
-
-		*data++ = n;
-		length--;
-	}
-}
-
 static void chap_gen_challenge(
 	struct iscsi_conn *conn,
 	int caller,
@@ -86,7 +60,7 @@
 
 	memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
 
-	chap_set_random(chap->challenge, CHAP_CHALLENGE_LENGTH);
+	get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH);
 	chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
 				CHAP_CHALLENGE_LENGTH);
 	/*
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 78d75c8..13e9e71 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -27,6 +27,7 @@
 #include <target/target_core_fabric_configfs.h>
 #include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
+#include <target/iscsi/iscsi_transport.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_parameters.h"
@@ -124,8 +125,87 @@
 
 TF_NP_BASE_ATTR(lio_target, sctp, S_IRUGO | S_IWUSR);
 
+static ssize_t lio_target_np_show_iser(
+	struct se_tpg_np *se_tpg_np,
+	char *page)
+{
+	struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
+				struct iscsi_tpg_np, se_tpg_np);
+	struct iscsi_tpg_np *tpg_np_iser;
+	ssize_t rb;
+
+	tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
+	if (tpg_np_iser)
+		rb = sprintf(page, "1\n");
+	else
+		rb = sprintf(page, "0\n");
+
+	return rb;
+}
+
+static ssize_t lio_target_np_store_iser(
+	struct se_tpg_np *se_tpg_np,
+	const char *page,
+	size_t count)
+{
+	struct iscsi_np *np;
+	struct iscsi_portal_group *tpg;
+	struct iscsi_tpg_np *tpg_np = container_of(se_tpg_np,
+				struct iscsi_tpg_np, se_tpg_np);
+	struct iscsi_tpg_np *tpg_np_iser = NULL;
+	char *endptr;
+	u32 op;
+	int rc;
+
+	op = simple_strtoul(page, &endptr, 0);
+	if ((op != 1) && (op != 0)) {
+		pr_err("Illegal value for tpg_enable: %u\n", op);
+		return -EINVAL;
+	}
+	np = tpg_np->tpg_np;
+	if (!np) {
+		pr_err("Unable to locate struct iscsi_np from"
+				" struct iscsi_tpg_np\n");
+		return -EINVAL;
+	}
+
+	tpg = tpg_np->tpg;
+	if (iscsit_get_tpg(tpg) < 0)
+		return -EINVAL;
+
+	if (op) {
+		int rc = request_module("ib_isert");
+		if (rc != 0)
+			pr_warn("Unable to request_module for ib_isert\n");
+
+		tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
+				np->np_ip, tpg_np, ISCSI_INFINIBAND);
+		if (!tpg_np_iser || IS_ERR(tpg_np_iser))
+			goto out;
+	} else {
+		tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
+		if (!tpg_np_iser)
+			goto out;
+
+		rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
+		if (rc < 0)
+			goto out;
+	}
+
+	printk("lio_target_np_store_iser() done, op: %d\n", op);
+
+	iscsit_put_tpg(tpg);
+	return count;
+out:
+	iscsit_put_tpg(tpg);
+	return -EINVAL;
+}
+
+TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR);
+
 static struct configfs_attribute *lio_target_portal_attrs[] = {
 	&lio_target_np_sctp.attr,
+	&lio_target_np_iser.attr,
 	NULL,
 };
 
@@ -1536,16 +1616,18 @@
 	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
 
 	cmd->i_state = ISTATE_SEND_DATAIN;
-	iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+	cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd);
+
 	return 0;
 }
 
 static int lio_write_pending(struct se_cmd *se_cmd)
 {
 	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+	struct iscsi_conn *conn = cmd->conn;
 
 	if (!cmd->immediate_data && !cmd->unsolicited_data)
-		return iscsit_build_r2ts_for_cmd(cmd, cmd->conn, false);
+		return conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
 
 	return 0;
 }
@@ -1567,7 +1649,8 @@
 	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
 
 	cmd->i_state = ISTATE_SEND_STATUS;
-	iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
+	cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd);
+
 	return 0;
 }
 
@@ -1696,11 +1779,17 @@
 	iscsit_set_default_node_attribues(acl);
 }
 
+static int lio_check_stop_free(struct se_cmd *se_cmd)
+{
+	return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+}
+
 static void lio_release_cmd(struct se_cmd *se_cmd)
 {
 	struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
 
-	iscsit_release_cmd(cmd);
+	pr_debug("Entering lio_release_cmd for se_cmd: %p\n", se_cmd);
+	cmd->release_cmd(cmd);
 }
 
 /* End functions for target_core_fabric_ops */
@@ -1740,6 +1829,7 @@
 	fabric->tf_ops.tpg_alloc_fabric_acl = &lio_tpg_alloc_fabric_acl;
 	fabric->tf_ops.tpg_release_fabric_acl = &lio_tpg_release_fabric_acl;
 	fabric->tf_ops.tpg_get_inst_index = &lio_tpg_get_inst_index;
+	fabric->tf_ops.check_stop_free = &lio_check_stop_free,
 	fabric->tf_ops.release_cmd = &lio_release_cmd;
 	fabric->tf_ops.shutdown_session = &lio_tpg_shutdown_session;
 	fabric->tf_ops.close_session = &lio_tpg_close_session;
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 7a333d2..60ec4b9 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -60,7 +60,7 @@
 
 #define ISCSI_IOV_DATA_BUFFER		5
 
-enum tpg_np_network_transport_table {
+enum iscsit_transport_type {
 	ISCSI_TCP				= 0,
 	ISCSI_SCTP_TCP				= 1,
 	ISCSI_SCTP_UDP				= 2,
@@ -244,6 +244,11 @@
 	u8	IFMarker;			/* [0,1] == [No,Yes] */
 	u32	OFMarkInt;			/* [1..65535] */
 	u32	IFMarkInt;			/* [1..65535] */
+	/*
+	 * iSER specific connection parameters
+	 */
+	u32	InitiatorRecvDataSegmentLength;	/* [512..2**24-1] */
+	u32	TargetRecvDataSegmentLength;	/* [512..2**24-1] */
 };
 
 struct iscsi_sess_ops {
@@ -265,6 +270,10 @@
 	u8	DataSequenceInOrder;		/* [0,1] == [No,Yes] */
 	u8	ErrorRecoveryLevel;		/* [0..2] */
 	u8	SessionType;			/* [0,1] == [Normal,Discovery]*/
+	/*
+	 * iSER specific session parameters
+	 */
+	u8	RDMAExtensions;			/* [0,1] == [No,Yes] */
 };
 
 struct iscsi_queue_req {
@@ -284,6 +293,7 @@
 };
 
 struct iscsi_param_list {
+	bool			iser;
 	struct list_head	param_list;
 	struct list_head	extra_response_list;
 };
@@ -475,6 +485,7 @@
 	u32			first_data_sg_off;
 	u32			kmapped_nents;
 	sense_reason_t		sense_reason;
+	void (*release_cmd)(struct iscsi_cmd *);
 }  ____cacheline_aligned;
 
 struct iscsi_tmr_req {
@@ -503,6 +514,7 @@
 	u16			login_port;
 	u16			local_port;
 	int			net_size;
+	int			login_family;
 	u32			auth_id;
 	u32			conn_flags;
 	/* Used for iscsi_tx_login_rsp() */
@@ -562,9 +574,12 @@
 	struct list_head	immed_queue_list;
 	struct list_head	response_queue_list;
 	struct iscsi_conn_ops	*conn_ops;
+	struct iscsi_login	*conn_login;
+	struct iscsit_transport *conn_transport;
 	struct iscsi_param_list	*param_list;
 	/* Used for per connection auth state machine */
 	void			*auth_protocol;
+	void			*context;
 	struct iscsi_login_thread_s *login_thread;
 	struct iscsi_portal_group *tpg;
 	/* Pointer to parent session */
@@ -663,6 +678,8 @@
 	u8 first_request;
 	u8 version_min;
 	u8 version_max;
+	u8 login_complete;
+	u8 login_failed;
 	char isid[6];
 	u32 cmd_sn;
 	itt_t init_task_tag;
@@ -670,10 +687,11 @@
 	u32 rsp_length;
 	u16 cid;
 	u16 tsih;
-	char *req;
-	char *rsp;
+	char req[ISCSI_HDR_LEN];
+	char rsp[ISCSI_HDR_LEN];
 	char *req_buf;
 	char *rsp_buf;
+	struct iscsi_conn *conn;
 } ____cacheline_aligned;
 
 struct iscsi_node_attrib {
@@ -754,6 +772,8 @@
 	struct task_struct	*np_thread;
 	struct timer_list	np_login_timer;
 	struct iscsi_portal_group *np_login_tpg;
+	void			*np_context;
+	struct iscsit_transport *np_transport;
 	struct list_head	np_list;
 } ____cacheline_aligned;
 
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index bcc4098..1b74033 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -60,8 +60,13 @@
 
 	cmd->maxcmdsn_inc = 1;
 
-	mutex_lock(&sess->cmdsn_mutex);
+	if (!mutex_trylock(&sess->cmdsn_mutex)) {
+		sess->max_cmd_sn += 1;
+		pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
+		return;
+	}
 	sess->max_cmd_sn += 1;
 	pr_debug("Updated MaxCmdSN to 0x%08x\n", sess->max_cmd_sn);
 	mutex_unlock(&sess->cmdsn_mutex);
 }
+EXPORT_SYMBOL(iscsit_increment_maxcmdsn);
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 0b52a23..7816af6 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -22,6 +22,7 @@
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
+#include <target/iscsi/iscsi_transport.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_seq_pdu_list.h"
@@ -53,6 +54,9 @@
 	u32 length, padding, offset = 0, size;
 	struct kvec iov;
 
+	if (conn->sess->sess_ops->RDMAExtensions)
+		return 0;
+
 	length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
 
 	buf = kzalloc(length, GFP_ATOMIC);
@@ -919,6 +923,7 @@
 int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
 {
 	struct se_cmd *se_cmd = &cmd->se_cmd;
+	struct iscsi_conn *conn = cmd->conn;
 	int lr = 0;
 
 	spin_lock_bh(&cmd->istate_lock);
@@ -981,7 +986,7 @@
 					return 0;
 
 				iscsit_set_dataout_sequence_values(cmd);
-				iscsit_build_r2ts_for_cmd(cmd, cmd->conn, false);
+				conn->conn_transport->iscsit_get_dataout(conn, cmd, false);
 			}
 			return 0;
 		}
@@ -999,10 +1004,7 @@
 			if (transport_check_aborted_status(se_cmd, 1) != 0)
 				return 0;
 
-			iscsit_set_dataout_sequence_values(cmd);
-			spin_lock_bh(&cmd->dataout_timeout_lock);
-			iscsit_start_dataout_timer(cmd, cmd->conn);
-			spin_unlock_bh(&cmd->dataout_timeout_lock);
+			iscsit_set_unsoliticed_dataout(cmd);
 		}
 		return transport_handle_cdb_direct(&cmd->se_cmd);
 
@@ -1290,3 +1292,4 @@
 			cmd->init_task_tag);
 	spin_unlock_bh(&cmd->dataout_timeout_lock);
 }
+EXPORT_SYMBOL(iscsit_stop_dataout_timer);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 2535d4d..bb5d5c5 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -39,8 +39,39 @@
 #include "iscsi_target.h"
 #include "iscsi_target_parameters.h"
 
-static int iscsi_login_init_conn(struct iscsi_conn *conn)
+#include <target/iscsi/iscsi_transport.h>
+
+static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
 {
+	struct iscsi_login *login;
+
+	login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL);
+	if (!login) {
+		pr_err("Unable to allocate memory for struct iscsi_login.\n");
+		return NULL;
+	}
+	login->conn = conn;
+	login->first_request = 1;
+
+	login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
+	if (!login->req_buf) {
+		pr_err("Unable to allocate memory for response buffer.\n");
+		goto out_login;
+	}
+
+	login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
+	if (!login->rsp_buf) {
+		pr_err("Unable to allocate memory for request buffer.\n");
+		goto out_req_buf;
+	}
+
+	conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
+	if (!conn->conn_ops) {
+		pr_err("Unable to allocate memory for"
+			" struct iscsi_conn_ops.\n");
+		goto out_rsp_buf;
+	}
+
 	init_waitqueue_head(&conn->queues_wq);
 	INIT_LIST_HEAD(&conn->conn_list);
 	INIT_LIST_HEAD(&conn->conn_cmd_list);
@@ -62,10 +93,21 @@
 
 	if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
 		pr_err("Unable to allocate conn->conn_cpumask\n");
-		return -ENOMEM;
+		goto out_conn_ops;
 	}
+	conn->conn_login = login;
 
-	return 0;
+	return login;
+
+out_conn_ops:
+	kfree(conn->conn_ops);
+out_rsp_buf:
+	kfree(login->rsp_buf);
+out_req_buf:
+	kfree(login->req_buf);
+out_login:
+	kfree(login);
+	return NULL;
 }
 
 /*
@@ -298,6 +340,7 @@
 	struct iscsi_node_attrib *na;
 	struct iscsi_session *sess = conn->sess;
 	unsigned char buf[32];
+	bool iser = false;
 
 	sess->tpg = conn->tpg;
 
@@ -319,7 +362,10 @@
 		return -1;
 	}
 
-	iscsi_set_keys_to_negotiate(0, conn->param_list);
+	if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
+		iser = true;
+
+	iscsi_set_keys_to_negotiate(conn->param_list, iser);
 
 	if (sess->sess_ops->SessionType)
 		return iscsi_set_keys_irrelevant_for_discovery(
@@ -357,6 +403,56 @@
 
 	if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0)
 		return -1;
+	/*
+	 * Set RDMAExtensions=Yes by default for iSER enabled network portals
+	 */
+	if (iser) {
+		struct iscsi_param *param;
+		unsigned long mrdsl, off;
+		int rc;
+
+		sprintf(buf, "RDMAExtensions=Yes");
+		if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+			return -1;
+		}
+		/*
+		 * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for
+		 * Immediate Data + Unsolicitied Data-OUT if necessary..
+		 */
+		param = iscsi_find_param_from_key("MaxRecvDataSegmentLength",
+						  conn->param_list);
+		if (!param) {
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+			return -1;
+		}
+		rc = strict_strtoul(param->value, 0, &mrdsl);
+		if (rc < 0) {
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+			return -1;
+		}
+		off = mrdsl % PAGE_SIZE;
+		if (!off)
+			return 0;
+
+		if (mrdsl < PAGE_SIZE)
+			mrdsl = PAGE_SIZE;
+		else
+			mrdsl -= off;
+
+		pr_warn("Aligning ISER MaxRecvDataSegmentLength: %lu down"
+			" to PAGE_SIZE\n", mrdsl);
+
+		sprintf(buf, "MaxRecvDataSegmentLength=%lu\n", mrdsl);
+		if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
+			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+				ISCSI_LOGIN_STATUS_NO_RESOURCES);
+			return -1;
+		}
+	}
 
 	return 0;
 }
@@ -436,6 +532,7 @@
 	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
 	struct se_session *se_sess, *se_sess_tmp;
 	struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+	bool iser = false;
 
 	spin_lock_bh(&se_tpg->session_lock);
 	list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
@@ -485,7 +582,10 @@
 		return -1;
 	}
 
-	iscsi_set_keys_to_negotiate(0, conn->param_list);
+	if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
+		iser = true;
+
+	iscsi_set_keys_to_negotiate(conn->param_list, iser);
 	/*
 	 * Need to send TargetPortalGroupTag back in first login response
 	 * on any iSCSI connection where the Initiator provides TargetName.
@@ -574,6 +674,11 @@
 static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
 {
 	struct iscsi_session *sess = conn->sess;
+	/*
+	 * FIXME: Unsolicitied NopIN support for ISER
+	 */
+	if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
+		return;
 
 	if (!sess->sess_ops->SessionType)
 		iscsit_start_nopin_timer(conn);
@@ -632,6 +737,7 @@
 		spin_unlock_bh(&sess->conn_lock);
 
 		iscsi_post_login_start_timers(conn);
+
 		iscsi_activate_thread_set(conn, ts);
 		/*
 		 * Determine CPU mask to ensure connection's RX and TX kthreads
@@ -761,11 +867,11 @@
 	spin_unlock_bh(&np->np_thread_lock);
 }
 
-int iscsi_target_setup_login_socket(
+int iscsit_setup_np(
 	struct iscsi_np *np,
 	struct __kernel_sockaddr_storage *sockaddr)
 {
-	struct socket *sock;
+	struct socket *sock = NULL;
 	int backlog = 5, ret, opt = 0, len;
 
 	switch (np->np_network_transport) {
@@ -781,15 +887,15 @@
 		np->np_ip_proto = IPPROTO_SCTP;
 		np->np_sock_type = SOCK_SEQPACKET;
 		break;
-	case ISCSI_IWARP_TCP:
-	case ISCSI_IWARP_SCTP:
-	case ISCSI_INFINIBAND:
 	default:
 		pr_err("Unsupported network_transport: %d\n",
 				np->np_network_transport);
 		return -EINVAL;
 	}
 
+	np->np_ip_proto = IPPROTO_TCP;
+	np->np_sock_type = SOCK_STREAM;
+
 	ret = sock_create(sockaddr->ss_family, np->np_sock_type,
 			np->np_ip_proto, &sock);
 	if (ret < 0) {
@@ -853,7 +959,6 @@
 	}
 
 	return 0;
-
 fail:
 	np->np_socket = NULL;
 	if (sock)
@@ -861,21 +966,169 @@
 	return ret;
 }
 
+int iscsi_target_setup_login_socket(
+	struct iscsi_np *np,
+	struct __kernel_sockaddr_storage *sockaddr)
+{
+	struct iscsit_transport *t;
+	int rc;
+
+	t = iscsit_get_transport(np->np_network_transport);
+	if (!t)
+		return -EINVAL;
+
+	rc = t->iscsit_setup_np(np, sockaddr);
+	if (rc < 0) {
+		iscsit_put_transport(t);
+		return rc;
+	}
+
+	np->np_transport = t;
+	printk("Set np->np_transport to %p -> %s\n", np->np_transport,
+				np->np_transport->name);
+	return 0;
+}
+
+int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
+{
+	struct socket *new_sock, *sock = np->np_socket;
+	struct sockaddr_in sock_in;
+	struct sockaddr_in6 sock_in6;
+	int rc, err;
+
+	rc = kernel_accept(sock, &new_sock, 0);
+	if (rc < 0)
+		return rc;
+
+	conn->sock = new_sock;
+	conn->login_family = np->np_sockaddr.ss_family;
+	printk("iSCSI/TCP: Setup conn->sock from new_sock: %p\n", new_sock);
+
+	if (np->np_sockaddr.ss_family == AF_INET6) {
+		memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
+
+		rc = conn->sock->ops->getname(conn->sock,
+				(struct sockaddr *)&sock_in6, &err, 1);
+		if (!rc) {
+			snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
+				&sock_in6.sin6_addr.in6_u);
+			conn->login_port = ntohs(sock_in6.sin6_port);
+		}
+
+		rc = conn->sock->ops->getname(conn->sock,
+				(struct sockaddr *)&sock_in6, &err, 0);
+		if (!rc) {
+			snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
+				&sock_in6.sin6_addr.in6_u);
+			conn->local_port = ntohs(sock_in6.sin6_port);
+		}
+	} else {
+		memset(&sock_in, 0, sizeof(struct sockaddr_in));
+
+		rc = conn->sock->ops->getname(conn->sock,
+				(struct sockaddr *)&sock_in, &err, 1);
+		if (!rc) {
+			sprintf(conn->login_ip, "%pI4",
+					&sock_in.sin_addr.s_addr);
+			conn->login_port = ntohs(sock_in.sin_port);
+		}
+
+		rc = conn->sock->ops->getname(conn->sock,
+				(struct sockaddr *)&sock_in, &err, 0);
+		if (!rc) {
+			sprintf(conn->local_ip, "%pI4",
+					&sock_in.sin_addr.s_addr);
+			conn->local_port = ntohs(sock_in.sin_port);
+		}
+	}
+
+	return 0;
+}
+
+int iscsit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+	struct iscsi_login_req *login_req;
+	u32 padding = 0, payload_length;
+
+	if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0)
+		return -1;
+
+	login_req = (struct iscsi_login_req *)login->req;
+	payload_length	= ntoh24(login_req->dlength);
+	padding = ((-payload_length) & 3);
+
+	pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
+		" CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
+		login_req->flags, login_req->itt, login_req->cmdsn,
+		login_req->exp_statsn, login_req->cid, payload_length);
+	/*
+	 * Setup the initial iscsi_login values from the leading
+	 * login request PDU.
+	 */
+	if (login->first_request) {
+		login_req = (struct iscsi_login_req *)login->req;
+		login->leading_connection = (!login_req->tsih) ? 1 : 0;
+		login->current_stage	= ISCSI_LOGIN_CURRENT_STAGE(login_req->flags);
+		login->version_min	= login_req->min_version;
+		login->version_max	= login_req->max_version;
+		memcpy(login->isid, login_req->isid, 6);
+		login->cmd_sn		= be32_to_cpu(login_req->cmdsn);
+		login->init_task_tag	= login_req->itt;
+		login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
+		login->cid		= be16_to_cpu(login_req->cid);
+		login->tsih		= be16_to_cpu(login_req->tsih);
+	}
+
+	if (iscsi_target_check_login_request(conn, login) < 0)
+		return -1;
+
+	memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
+	if (iscsi_login_rx_data(conn, login->req_buf,
+				payload_length + padding) < 0)
+		return -1;
+
+	return 0;
+}
+
+int iscsit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
+			u32 length)
+{
+	if (iscsi_login_tx_data(conn, login->rsp, login->rsp_buf, length) < 0)
+		return -1;
+
+	return 0;
+}
+
+static int
+iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
+{
+	int rc;
+
+	if (!t->owner) {
+		conn->conn_transport = t;
+		return 0;
+	}
+
+	rc = try_module_get(t->owner);
+	if (!rc) {
+		pr_err("try_module_get() failed for %s\n", t->name);
+		return -EINVAL;
+	}
+
+	conn->conn_transport = t;
+	return 0;
+}
+
 static int __iscsi_target_login_thread(struct iscsi_np *np)
 {
-	u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0;
-	int err, ret = 0, stop;
+	u8 *buffer, zero_tsih = 0;
+	int ret = 0, rc, stop;
 	struct iscsi_conn *conn = NULL;
 	struct iscsi_login *login;
 	struct iscsi_portal_group *tpg = NULL;
-	struct socket *new_sock, *sock;
-	struct kvec iov;
 	struct iscsi_login_req *pdu;
-	struct sockaddr_in sock_in;
-	struct sockaddr_in6 sock_in6;
 
 	flush_signals(current);
-	sock = np->np_socket;
 
 	spin_lock_bh(&np->np_thread_lock);
 	if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
@@ -886,75 +1139,76 @@
 	}
 	spin_unlock_bh(&np->np_thread_lock);
 
-	if (kernel_accept(sock, &new_sock, 0) < 0) {
-		spin_lock_bh(&np->np_thread_lock);
-		if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
-			spin_unlock_bh(&np->np_thread_lock);
-			complete(&np->np_restart_comp);
-			/* Get another socket */
-			return 1;
-		}
-		spin_unlock_bh(&np->np_thread_lock);
-		goto out;
-	}
-	iscsi_start_login_thread_timer(np);
-
 	conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
 	if (!conn) {
 		pr_err("Could not allocate memory for"
 			" new connection\n");
-		sock_release(new_sock);
 		/* Get another socket */
 		return 1;
 	}
-
 	pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
 	conn->conn_state = TARG_CONN_STATE_FREE;
-	conn->sock = new_sock;
 
-	pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
-	conn->conn_state = TARG_CONN_STATE_XPT_UP;
+	if (iscsit_conn_set_transport(conn, np->np_transport) < 0) {
+		kfree(conn);
+		return 1;
+	}
 
-	/*
-	 * Allocate conn->conn_ops early as a failure calling
-	 * iscsit_tx_login_rsp() below will call tx_data().
-	 */
-	conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
-	if (!conn->conn_ops) {
-		pr_err("Unable to allocate memory for"
-			" struct iscsi_conn_ops.\n");
-		goto new_sess_out;
+	rc = np->np_transport->iscsit_accept_np(np, conn);
+	if (rc == -ENOSYS) {
+		complete(&np->np_restart_comp);
+		iscsit_put_transport(conn->conn_transport);
+		kfree(conn);
+		conn = NULL;
+		goto exit;
+	} else if (rc < 0) {
+		spin_lock_bh(&np->np_thread_lock);
+		if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+			spin_unlock_bh(&np->np_thread_lock);
+			complete(&np->np_restart_comp);
+			if (ret == -ENODEV) {
+				iscsit_put_transport(conn->conn_transport);
+				kfree(conn);
+				conn = NULL;
+				goto out;
+			}
+			/* Get another socket */
+			return 1;
+		}
+		spin_unlock_bh(&np->np_thread_lock);
+		iscsit_put_transport(conn->conn_transport);
+		kfree(conn);
+		conn = NULL;
+		goto out;
 	}
 	/*
 	 * Perform the remaining iSCSI connection initialization items..
 	 */
-	if (iscsi_login_init_conn(conn) < 0)
-		goto new_sess_out;
-
-	memset(buffer, 0, ISCSI_HDR_LEN);
-	memset(&iov, 0, sizeof(struct kvec));
-	iov.iov_base	= buffer;
-	iov.iov_len	= ISCSI_HDR_LEN;
-
-	if (rx_data(conn, &iov, 1, ISCSI_HDR_LEN) <= 0) {
-		pr_err("rx_data() returned an error.\n");
+	login = iscsi_login_init_conn(conn);
+	if (!login) {
 		goto new_sess_out;
 	}
 
-	iscsi_opcode = (buffer[0] & ISCSI_OPCODE_MASK);
-	if (!(iscsi_opcode & ISCSI_OP_LOGIN)) {
-		pr_err("First opcode is not login request,"
-			" failing login request.\n");
+	iscsi_start_login_thread_timer(np);
+
+	pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
+	conn->conn_state = TARG_CONN_STATE_XPT_UP;
+	/*
+	 * This will process the first login request + payload..
+	 */
+	rc = np->np_transport->iscsit_get_login_rx(conn, login);
+	if (rc == 1)
+		return 1;
+	else if (rc < 0)
 		goto new_sess_out;
-	}
 
-	pdu			= (struct iscsi_login_req *) buffer;
-
+	buffer = &login->req[0];
+	pdu = (struct iscsi_login_req *)buffer;
 	/*
 	 * Used by iscsit_tx_login_rsp() for Login Resonses PDUs
 	 * when Status-Class != 0.
 	*/
-	conn->login_itt		= pdu->itt;
+	conn->login_itt	= pdu->itt;
 
 	spin_lock_bh(&np->np_thread_lock);
 	if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
@@ -967,61 +1221,11 @@
 	}
 	spin_unlock_bh(&np->np_thread_lock);
 
-	if (np->np_sockaddr.ss_family == AF_INET6) {
-		memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
-
-		if (conn->sock->ops->getname(conn->sock,
-				(struct sockaddr *)&sock_in6, &err, 1) < 0) {
-			pr_err("sock_ops->getname() failed.\n");
-			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
-					ISCSI_LOGIN_STATUS_TARGET_ERROR);
-			goto new_sess_out;
-		}
-		snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
-				&sock_in6.sin6_addr.in6_u);
-		conn->login_port = ntohs(sock_in6.sin6_port);
-
-		if (conn->sock->ops->getname(conn->sock,
-				(struct sockaddr *)&sock_in6, &err, 0) < 0) {
-			pr_err("sock_ops->getname() failed.\n");
-			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
-					ISCSI_LOGIN_STATUS_TARGET_ERROR);
-			goto new_sess_out;
-		}
-		snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
-				&sock_in6.sin6_addr.in6_u);
-		conn->local_port = ntohs(sock_in6.sin6_port);
-
-	} else {
-		memset(&sock_in, 0, sizeof(struct sockaddr_in));
-
-		if (conn->sock->ops->getname(conn->sock,
-				(struct sockaddr *)&sock_in, &err, 1) < 0) {
-			pr_err("sock_ops->getname() failed.\n");
-			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
-					ISCSI_LOGIN_STATUS_TARGET_ERROR);
-			goto new_sess_out;
-		}
-		sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr);
-		conn->login_port = ntohs(sock_in.sin_port);
-
-		if (conn->sock->ops->getname(conn->sock,
-				(struct sockaddr *)&sock_in, &err, 0) < 0) {
-			pr_err("sock_ops->getname() failed.\n");
-			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
-					ISCSI_LOGIN_STATUS_TARGET_ERROR);
-			goto new_sess_out;
-		}
-		sprintf(conn->local_ip, "%pI4", &sock_in.sin_addr.s_addr);
-		conn->local_port = ntohs(sock_in.sin_port);
-	}
-
 	conn->network_transport = np->np_network_transport;
 
 	pr_debug("Received iSCSI login request from %s on %s Network"
-			" Portal %s:%hu\n", conn->login_ip,
-		(conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP",
-			conn->local_ip, conn->local_port);
+		" Portal %s:%hu\n", conn->login_ip, np->np_transport->name,
+		conn->local_ip, conn->local_port);
 
 	pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
 	conn->conn_state	= TARG_CONN_STATE_IN_LOGIN;
@@ -1050,13 +1254,17 @@
 		if (iscsi_login_non_zero_tsih_s1(conn, buffer) < 0)
 			goto new_sess_out;
 	}
-
 	/*
-	 * This will process the first login request, and call
-	 * iscsi_target_locate_portal(), and return a valid struct iscsi_login.
+	 * SessionType: Discovery
+	 *
+	 * 	Locates Default Portal
+	 *
+	 * SessionType: Normal
+	 *
+	 * 	Locates Target Portal from NP -> Target IQN
 	 */
-	login = iscsi_target_init_negotiation(np, conn, buffer);
-	if (!login) {
+	rc = iscsi_target_locate_portal(np, conn, login);
+	if (rc < 0) {
 		tpg = conn->tpg;
 		goto new_sess_out;
 	}
@@ -1068,15 +1276,11 @@
 	}
 
 	if (zero_tsih) {
-		if (iscsi_login_zero_tsih_s2(conn) < 0) {
-			iscsi_target_nego_release(login, conn);
+		if (iscsi_login_zero_tsih_s2(conn) < 0)
 			goto new_sess_out;
-		}
 	} else {
-		if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0) {
-			iscsi_target_nego_release(login, conn);
+		if (iscsi_login_non_zero_tsih_s2(conn, buffer) < 0)
 			goto old_sess_out;
-		}
 	}
 
 	if (iscsi_target_start_negotiation(login, conn) < 0)
@@ -1153,8 +1357,18 @@
 		iscsi_release_param_list(conn->param_list);
 		conn->param_list = NULL;
 	}
-	if (conn->sock)
+	iscsi_target_nego_release(conn);
+
+	if (conn->sock) {
 		sock_release(conn->sock);
+		conn->sock = NULL;
+	}
+
+	if (conn->conn_transport->iscsit_free_conn)
+		conn->conn_transport->iscsit_free_conn(conn);
+
+	iscsit_put_transport(conn->conn_transport);
+
 	kfree(conn);
 
 	if (tpg) {
@@ -1172,11 +1386,13 @@
 	/* Wait for another socket.. */
 	if (!stop)
 		return 1;
-
+exit:
 	iscsi_stop_login_thread_timer(np);
 	spin_lock_bh(&np->np_thread_lock);
 	np->np_thread_state = ISCSI_NP_THREAD_EXIT;
+	np->np_thread = NULL;
 	spin_unlock_bh(&np->np_thread_lock);
+
 	return 0;
 }
 
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 091dcae..63efd28 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -4,8 +4,14 @@
 extern int iscsi_login_setup_crypto(struct iscsi_conn *);
 extern int iscsi_check_for_session_reinstatement(struct iscsi_conn *);
 extern int iscsi_login_post_auth_non_zero_tsih(struct iscsi_conn *, u16, u32);
+extern int iscsit_setup_np(struct iscsi_np *,
+				struct __kernel_sockaddr_storage *);
 extern int iscsi_target_setup_login_socket(struct iscsi_np *,
 				struct __kernel_sockaddr_storage *);
+extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
+extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
+extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
+extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
 extern int iscsi_target_login_thread(void *);
 extern int iscsi_login_disable_FIM_keys(struct iscsi_param_list *, struct iscsi_conn *);
 
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 9d902ae..7ad9120 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -22,6 +22,7 @@
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
+#include <target/iscsi/iscsi_transport.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_parameters.h"
@@ -169,7 +170,7 @@
 	kfree(conn->auth_protocol);
 }
 
-static int iscsi_target_check_login_request(
+int iscsi_target_check_login_request(
 	struct iscsi_conn *conn,
 	struct iscsi_login *login)
 {
@@ -200,8 +201,8 @@
 		return -1;
 	}
 
-	req_csg = (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
-	req_nsg = (login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE_MASK);
+	req_csg = ISCSI_LOGIN_CURRENT_STAGE(login_req->flags);
+	req_nsg = ISCSI_LOGIN_NEXT_STAGE(login_req->flags);
 
 	if (req_csg != login->current_stage) {
 		pr_err("Initiator unexpectedly changed login stage"
@@ -352,11 +353,8 @@
 
 	padding = ((-login->rsp_length) & 3);
 
-	if (iscsi_login_tx_data(
-			conn,
-			login->rsp,
-			login->rsp_buf,
-			login->rsp_length + padding) < 0)
+	if (conn->conn_transport->iscsit_put_login_tx(conn, login,
+					login->rsp_length + padding) < 0)
 		return -1;
 
 	login->rsp_length		= 0;
@@ -368,72 +366,12 @@
 	return 0;
 }
 
-static int iscsi_target_do_rx_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
-{
-	u32 padding = 0, payload_length;
-	struct iscsi_login_req *login_req;
-
-	if (iscsi_login_rx_data(conn, login->req, ISCSI_HDR_LEN) < 0)
-		return -1;
-
-	login_req = (struct iscsi_login_req *) login->req;
-	payload_length			= ntoh24(login_req->dlength);
-
-	pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
-		" CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
-		 login_req->flags, login_req->itt, login_req->cmdsn,
-		 login_req->exp_statsn, login_req->cid, payload_length);
-
-	if (iscsi_target_check_login_request(conn, login) < 0)
-		return -1;
-
-	padding = ((-payload_length) & 3);
-	memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
-
-	if (iscsi_login_rx_data(
-			conn,
-			login->req_buf,
-			payload_length + padding) < 0)
-		return -1;
-
-	return 0;
-}
-
 static int iscsi_target_do_login_io(struct iscsi_conn *conn, struct iscsi_login *login)
 {
 	if (iscsi_target_do_tx_login_io(conn, login) < 0)
 		return -1;
 
-	if (iscsi_target_do_rx_login_io(conn, login) < 0)
-		return -1;
-
-	return 0;
-}
-
-static int iscsi_target_get_initial_payload(
-	struct iscsi_conn *conn,
-	struct iscsi_login *login)
-{
-	u32 padding = 0, payload_length;
-	struct iscsi_login_req *login_req;
-
-	login_req = (struct iscsi_login_req *) login->req;
-	payload_length = ntoh24(login_req->dlength);
-
-	pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
-		" CmdSN: 0x%08x, ExpStatSN: 0x%08x, Length: %u\n",
-		login_req->flags, login_req->itt, login_req->cmdsn,
-		login_req->exp_statsn, payload_length);
-
-	if (iscsi_target_check_login_request(conn, login) < 0)
-		return -1;
-
-	padding = ((-payload_length) & 3);
-
-	if (iscsi_login_rx_data(
-			conn,
-			login->req_buf,
-			payload_length + padding) < 0)
+	if (conn->conn_transport->iscsit_get_login_rx(conn, login) < 0)
 		return -1;
 
 	return 0;
@@ -681,9 +619,9 @@
 			return -1;
 		}
 
-		switch ((login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2) {
+		switch (ISCSI_LOGIN_CURRENT_STAGE(login_req->flags)) {
 		case 0:
-			login_rsp->flags |= (0 & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK);
+			login_rsp->flags &= ~ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK;
 			if (iscsi_target_handle_csg_zero(conn, login) < 0)
 				return -1;
 			break;
@@ -693,6 +631,7 @@
 				return -1;
 			if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
 				login->tsih = conn->sess->tsih;
+				login->login_complete = 1;
 				if (iscsi_target_do_tx_login_io(conn,
 						login) < 0)
 					return -1;
@@ -702,8 +641,7 @@
 		default:
 			pr_err("Illegal CSG: %d received from"
 				" Initiator, protocol error.\n",
-				(login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
-				>> 2);
+				ISCSI_LOGIN_CURRENT_STAGE(login_req->flags));
 			break;
 		}
 
@@ -737,7 +675,7 @@
 /*
  * Processes the first Login Request..
  */
-static int iscsi_target_locate_portal(
+int iscsi_target_locate_portal(
 	struct iscsi_np *np,
 	struct iscsi_conn *conn,
 	struct iscsi_login *login)
@@ -753,22 +691,6 @@
 	login_req = (struct iscsi_login_req *) login->req;
 	payload_length = ntoh24(login_req->dlength);
 
-	login->first_request	= 1;
-	login->leading_connection = (!login_req->tsih) ? 1 : 0;
-	login->current_stage	=
-		(login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK) >> 2;
-	login->version_min	= login_req->min_version;
-	login->version_max	= login_req->max_version;
-	memcpy(login->isid, login_req->isid, 6);
-	login->cmd_sn		= be32_to_cpu(login_req->cmdsn);
-	login->init_task_tag	= login_req->itt;
-	login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
-	login->cid		= be16_to_cpu(login_req->cid);
-	login->tsih		= be16_to_cpu(login_req->tsih);
-
-	if (iscsi_target_get_initial_payload(conn, login) < 0)
-		return -1;
-
 	tmpbuf = kzalloc(payload_length + 1, GFP_KERNEL);
 	if (!tmpbuf) {
 		pr_err("Unable to allocate memory for tmpbuf.\n");
@@ -800,6 +722,8 @@
 		start += strlen(key) + strlen(value) + 2;
 	}
 
+	printk("i_buf: %s, s_buf: %s, t_buf: %s\n", i_buf, s_buf, t_buf);
+
 	/*
 	 * See 5.3.  Login Phase.
 	 */
@@ -958,100 +882,30 @@
 	return ret;
 }
 
-struct iscsi_login *iscsi_target_init_negotiation(
-	struct iscsi_np *np,
-	struct iscsi_conn *conn,
-	char *login_pdu)
-{
-	struct iscsi_login *login;
-
-	login = kzalloc(sizeof(struct iscsi_login), GFP_KERNEL);
-	if (!login) {
-		pr_err("Unable to allocate memory for struct iscsi_login.\n");
-		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
-				ISCSI_LOGIN_STATUS_NO_RESOURCES);
-		return NULL;
-	}
-
-	login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
-	if (!login->req) {
-		pr_err("Unable to allocate memory for Login Request.\n");
-		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
-				ISCSI_LOGIN_STATUS_NO_RESOURCES);
-		goto out;
-	}
-
-	login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
-	if (!login->req_buf) {
-		pr_err("Unable to allocate memory for response buffer.\n");
-		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
-				ISCSI_LOGIN_STATUS_NO_RESOURCES);
-		goto out;
-	}
-	/*
-	 * SessionType: Discovery
-	 *
-	 *	Locates Default Portal
-	 *
-	 * SessionType: Normal
-	 *
-	 *	Locates Target Portal from NP -> Target IQN
-	 */
-	if (iscsi_target_locate_portal(np, conn, login) < 0) {
-		goto out;
-	}
-
-	return login;
-out:
-	kfree(login->req);
-	kfree(login->req_buf);
-	kfree(login);
-
-	return NULL;
-}
-
 int iscsi_target_start_negotiation(
 	struct iscsi_login *login,
 	struct iscsi_conn *conn)
 {
-	int ret = -1;
-
-	login->rsp = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
-	if (!login->rsp) {
-		pr_err("Unable to allocate memory for"
-				" Login Response.\n");
-		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
-				ISCSI_LOGIN_STATUS_NO_RESOURCES);
-		ret = -1;
-		goto out;
-	}
-
-	login->rsp_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
-	if (!login->rsp_buf) {
-		pr_err("Unable to allocate memory for"
-			" request buffer.\n");
-		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
-				ISCSI_LOGIN_STATUS_NO_RESOURCES);
-		ret = -1;
-		goto out;
-	}
+	int ret;
 
 	ret = iscsi_target_do_login(conn, login);
-out:
 	if (ret != 0)
 		iscsi_remove_failed_auth_entry(conn);
 
-	iscsi_target_nego_release(login, conn);
+	iscsi_target_nego_release(conn);
 	return ret;
 }
 
-void iscsi_target_nego_release(
-	struct iscsi_login *login,
-	struct iscsi_conn *conn)
+void iscsi_target_nego_release(struct iscsi_conn *conn)
 {
-	kfree(login->req);
-	kfree(login->rsp);
+	struct iscsi_login *login = conn->conn_login;
+
+	if (!login)
+		return;
+
 	kfree(login->req_buf);
 	kfree(login->rsp_buf);
 	kfree(login);
+
+	conn->conn_login = NULL;
 }
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
index 92e133a..f021cbd 100644
--- a/drivers/target/iscsi/iscsi_target_nego.h
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -7,11 +7,14 @@
 extern void convert_null_to_semi(char *, int);
 extern int extract_param(const char *, const char *, unsigned int, char *,
 		unsigned char *);
-extern struct iscsi_login *iscsi_target_init_negotiation(
-		struct iscsi_np *, struct iscsi_conn *, char *);
+extern int iscsi_target_check_login_request(struct iscsi_conn *,
+		struct iscsi_login *);
+extern int iscsi_target_get_initial_payload(struct iscsi_conn *,
+		struct iscsi_login *);
+extern int iscsi_target_locate_portal(struct iscsi_np *, struct iscsi_conn *,
+		struct iscsi_login *);
 extern int iscsi_target_start_negotiation(
 		struct iscsi_login *, struct iscsi_conn *);
-extern void iscsi_target_nego_release(
-		struct iscsi_login *, struct iscsi_conn *);
+extern void iscsi_target_nego_release(struct iscsi_conn *);
 
 #endif /* ISCSI_TARGET_NEGO_H */
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index ca2be40..f690be9 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -59,7 +59,7 @@
 	char *text_buf,
 	int text_length)
 {
-	int length, tx_sent;
+	int length, tx_sent, iov_cnt = 1;
 	struct kvec iov[2];
 
 	length = (ISCSI_HDR_LEN + text_length);
@@ -67,8 +67,12 @@
 	memset(&iov[0], 0, 2 * sizeof(struct kvec));
 	iov[0].iov_len		= ISCSI_HDR_LEN;
 	iov[0].iov_base		= pdu_buf;
-	iov[1].iov_len		= text_length;
-	iov[1].iov_base		= text_buf;
+
+	if (text_buf && text_length) {
+		iov[1].iov_len	= text_length;
+		iov[1].iov_base	= text_buf;
+		iov_cnt++;
+	}
 
 	/*
 	 * Initial Marker-less Interval.
@@ -77,7 +81,7 @@
 	 */
 	conn->if_marker += length;
 
-	tx_sent = tx_data(conn, &iov[0], 2, length);
+	tx_sent = tx_data(conn, &iov[0], iov_cnt, length);
 	if (tx_sent != length) {
 		pr_err("tx_data returned %d, expecting %d.\n",
 				tx_sent, length);
@@ -429,6 +433,28 @@
 			TYPERANGE_MARKINT, USE_INITIAL_ONLY);
 	if (!param)
 		goto out;
+	/*
+	 * Extra parameters for ISER from RFC-5046
+	 */
+	param = iscsi_set_default_param(pl, RDMAEXTENTIONS, INITIAL_RDMAEXTENTIONS,
+			PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
+			TYPERANGE_BOOL_AND, USE_LEADING_ONLY);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, INITIATORRECVDATASEGMENTLENGTH,
+			INITIAL_INITIATORRECVDATASEGMENTLENGTH,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_512_TO_16777215, USE_ALL);
+	if (!param)
+		goto out;
+
+	param = iscsi_set_default_param(pl, TARGETRECVDATASEGMENTLENGTH,
+			INITIAL_TARGETRECVDATASEGMENTLENGTH,
+			PHASE_OPERATIONAL, SCOPE_CONNECTION_ONLY, SENDER_BOTH,
+			TYPERANGE_512_TO_16777215, USE_ALL);
+	if (!param)
+		goto out;
 
 	*param_list_ptr = pl;
 	return 0;
@@ -438,19 +464,23 @@
 }
 
 int iscsi_set_keys_to_negotiate(
-	int sessiontype,
-	struct iscsi_param_list *param_list)
+	struct iscsi_param_list *param_list,
+	bool iser)
 {
 	struct iscsi_param *param;
 
+	param_list->iser = iser;
+
 	list_for_each_entry(param, &param_list->param_list, p_list) {
 		param->state = 0;
 		if (!strcmp(param->name, AUTHMETHOD)) {
 			SET_PSTATE_NEGOTIATE(param);
 		} else if (!strcmp(param->name, HEADERDIGEST)) {
-			SET_PSTATE_NEGOTIATE(param);
+			if (iser == false)
+				SET_PSTATE_NEGOTIATE(param);
 		} else if (!strcmp(param->name, DATADIGEST)) {
-			SET_PSTATE_NEGOTIATE(param);
+			if (iser == false)
+				SET_PSTATE_NEGOTIATE(param);
 		} else if (!strcmp(param->name, MAXCONNECTIONS)) {
 			SET_PSTATE_NEGOTIATE(param);
 		} else if (!strcmp(param->name, TARGETNAME)) {
@@ -469,7 +499,8 @@
 		} else if (!strcmp(param->name, IMMEDIATEDATA)) {
 			SET_PSTATE_NEGOTIATE(param);
 		} else if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) {
-			SET_PSTATE_NEGOTIATE(param);
+			if (iser == false)
+				SET_PSTATE_NEGOTIATE(param);
 		} else if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) {
 			continue;
 		} else if (!strcmp(param->name, MAXBURSTLENGTH)) {
@@ -498,6 +529,15 @@
 			SET_PSTATE_NEGOTIATE(param);
 		} else if (!strcmp(param->name, OFMARKINT)) {
 			SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, RDMAEXTENTIONS)) {
+			if (iser == true)
+				SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
+			if (iser == true)
+				SET_PSTATE_NEGOTIATE(param);
+		} else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) {
+			if (iser == true)
+				SET_PSTATE_NEGOTIATE(param);
 		}
 	}
 
@@ -540,6 +580,12 @@
 			param->state &= ~PSTATE_NEGOTIATE;
 		else if (!strcmp(param->name, OFMARKINT))
 			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, RDMAEXTENTIONS))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH))
+			param->state &= ~PSTATE_NEGOTIATE;
+		else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH))
+			param->state &= ~PSTATE_NEGOTIATE;
 	}
 
 	return 0;
@@ -1755,6 +1801,9 @@
 		 * this key is not sent over the wire.
 		 */
 		if (!strcmp(param->name, MAXXMITDATASEGMENTLENGTH)) {
+			if (param_list->iser == true)
+				continue;
+
 			ops->MaxXmitDataSegmentLength =
 				simple_strtoul(param->value, &tmpptr, 0);
 			pr_debug("MaxXmitDataSegmentLength:     %s\n",
@@ -1800,6 +1849,22 @@
 				simple_strtoul(param->value, &tmpptr, 0);
 			pr_debug("IFMarkInt:                    %s\n",
 				param->value);
+		} else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
+			ops->InitiatorRecvDataSegmentLength =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("InitiatorRecvDataSegmentLength: %s\n",
+				param->value);
+			ops->MaxRecvDataSegmentLength =
+					ops->InitiatorRecvDataSegmentLength;
+			pr_debug("Set MRDSL from InitiatorRecvDataSegmentLength\n");
+		} else if (!strcmp(param->name, TARGETRECVDATASEGMENTLENGTH)) {
+			ops->TargetRecvDataSegmentLength =
+				simple_strtoul(param->value, &tmpptr, 0);
+			pr_debug("TargetRecvDataSegmentLength:  %s\n",
+				param->value);
+			ops->MaxXmitDataSegmentLength =
+					ops->TargetRecvDataSegmentLength;
+			pr_debug("Set MXDSL from TargetRecvDataSegmentLength\n");
 		}
 	}
 	pr_debug("----------------------------------------------------"
@@ -1912,6 +1977,10 @@
 			ops->SessionType = !strcmp(param->value, DISCOVERY);
 			pr_debug("SessionType:                  %s\n",
 				param->value);
+		} else if (!strcmp(param->name, RDMAEXTENTIONS)) {
+			ops->RDMAExtensions = !strcmp(param->value, YES);
+			pr_debug("RDMAExtensions:               %s\n",
+				param->value);
 		}
 	}
 	pr_debug("----------------------------------------------------"
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index 1e1b750..f31b9c4 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -27,7 +27,7 @@
 extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *);
 extern void iscsi_print_params(struct iscsi_param_list *);
 extern int iscsi_create_default_params(struct iscsi_param_list **);
-extern int iscsi_set_keys_to_negotiate(int, struct iscsi_param_list *);
+extern int iscsi_set_keys_to_negotiate(struct iscsi_param_list *, bool);
 extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *);
 extern int iscsi_copy_param_list(struct iscsi_param_list **,
 			struct iscsi_param_list *, int);
@@ -89,6 +89,13 @@
 #define X_EXTENSIONKEY_CISCO_OLD	"X-com.cisco.iscsi.draft"
 
 /*
+ * Parameter names of iSCSI Extentions for RDMA (iSER).  See RFC-5046
+ */
+#define RDMAEXTENTIONS			"RDMAExtensions"
+#define INITIATORRECVDATASEGMENTLENGTH	"InitiatorRecvDataSegmentLength"
+#define TARGETRECVDATASEGMENTLENGTH	"TargetRecvDataSegmentLength"
+
+/*
  * For AuthMethod.
  */
 #define KRB5				"KRB5"
@@ -133,6 +140,13 @@
 #define INITIAL_OFMARKINT			"2048~65535"
 
 /*
+ * Initial values for iSER parameters following RFC-5046 Section 6
+ */
+#define INITIAL_RDMAEXTENTIONS			NO
+#define INITIAL_INITIATORRECVDATASEGMENTLENGTH	"262144"
+#define INITIAL_TARGETRECVDATASEGMENTLENGTH	"8192"
+
+/*
  * For [Header,Data]Digests.
  */
 #define CRC32C				"CRC32C"
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index 9d4417a..b997e5d 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -23,6 +23,7 @@
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
+#include <target/iscsi/iscsi_transport.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_seq_pdu_list.h"
@@ -301,7 +302,7 @@
 	/*
 	 * iscsit_build_r2ts_for_cmd() can handle the rest from here.
 	 */
-	return iscsit_build_r2ts_for_cmd(cmd, conn, true);
+	return conn->conn_transport->iscsit_get_dataout(conn, cmd, true);
 }
 
 static int iscsit_task_reassign_complete_read(
@@ -471,6 +472,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(iscsit_tmr_post_handler);
 
 /*
  *	Nothing to do here, but leave it for good measure. :-)
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index ee8f8c6..439260b 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -31,6 +31,8 @@
 #include "iscsi_target.h"
 #include "iscsi_target_parameters.h"
 
+#include <target/iscsi/iscsi_transport.h>
+
 struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u16 tpgt)
 {
 	struct iscsi_portal_group *tpg;
@@ -508,7 +510,7 @@
 
 	pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n",
 		tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
-		(np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP");
+		np->np_transport->name);
 
 	return tpg_np;
 }
@@ -522,7 +524,7 @@
 
 	pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
 		tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
-		(np->np_network_transport == ISCSI_TCP) ? "TCP" : "SCTP");
+		np->np_transport->name);
 
 	tpg_np->tpg_np = NULL;
 	tpg_np->tpg = NULL;
diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c
new file mode 100644
index 0000000..882728f
--- /dev/null
+++ b/drivers/target/iscsi/iscsi_target_transport.c
@@ -0,0 +1,55 @@
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <target/iscsi/iscsi_transport.h>
+
+static LIST_HEAD(g_transport_list);
+static DEFINE_MUTEX(transport_mutex);
+
+struct iscsit_transport *iscsit_get_transport(int type)
+{
+	struct iscsit_transport *t;
+
+	mutex_lock(&transport_mutex);
+	list_for_each_entry(t, &g_transport_list, t_node) {
+		if (t->transport_type == type) {
+			if (t->owner && !try_module_get(t->owner)) {
+				t = NULL;
+			}
+			mutex_unlock(&transport_mutex);
+			return t;
+		}
+	}
+	mutex_unlock(&transport_mutex);
+
+	return NULL;
+}
+
+void iscsit_put_transport(struct iscsit_transport *t)
+{
+	if (t->owner)
+		module_put(t->owner);
+}
+
+int iscsit_register_transport(struct iscsit_transport *t)
+{
+	INIT_LIST_HEAD(&t->t_node);
+
+	mutex_lock(&transport_mutex);
+	list_add_tail(&t->t_node, &g_transport_list);
+	mutex_unlock(&transport_mutex);
+
+	pr_debug("Registered iSCSI transport: %s\n", t->name);
+
+	return 0;
+}
+EXPORT_SYMBOL(iscsit_register_transport);
+
+void iscsit_unregister_transport(struct iscsit_transport *t)
+{
+	mutex_lock(&transport_mutex);
+	list_del(&t->t_node);
+	mutex_unlock(&transport_mutex);
+
+	pr_debug("Unregistered iSCSI transport: %s\n", t->name);
+}
+EXPORT_SYMBOL(iscsit_unregister_transport);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 7ce3505..2cc6c9a 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -24,6 +24,7 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
+#include <target/iscsi/iscsi_transport.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_parameters.h"
@@ -148,6 +149,18 @@
 	spin_unlock_bh(&cmd->r2t_lock);
 }
 
+struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp_mask)
+{
+	struct iscsi_cmd *cmd;
+
+	cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask);
+	if (!cmd)
+		return NULL;
+
+	cmd->release_cmd = &iscsit_release_cmd;
+	return cmd;
+}
+
 /*
  * May be called from software interrupt (timer) context for allocating
  * iSCSI NopINs.
@@ -156,13 +169,12 @@
 {
 	struct iscsi_cmd *cmd;
 
-	cmd = kmem_cache_zalloc(lio_cmd_cache, gfp_mask);
+	cmd = conn->conn_transport->iscsit_alloc_cmd(conn, gfp_mask);
 	if (!cmd) {
 		pr_err("Unable to allocate memory for struct iscsi_cmd.\n");
 		return NULL;
 	}
-
-	cmd->conn	= conn;
+	cmd->conn = conn;
 	INIT_LIST_HEAD(&cmd->i_conn_node);
 	INIT_LIST_HEAD(&cmd->datain_list);
 	INIT_LIST_HEAD(&cmd->cmd_r2t_list);
@@ -175,6 +187,7 @@
 
 	return cmd;
 }
+EXPORT_SYMBOL(iscsit_allocate_cmd);
 
 struct iscsi_seq *iscsit_get_seq_holder_for_datain(
 	struct iscsi_cmd *cmd,
@@ -304,6 +317,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL(iscsit_sequence_cmd);
 
 int iscsit_check_unsolicited_dataout(struct iscsi_cmd *cmd, unsigned char *buf)
 {
@@ -689,6 +703,11 @@
 	 */
 	switch (cmd->iscsi_opcode) {
 	case ISCSI_OP_SCSI_CMD:
+		if (cmd->data_direction == DMA_TO_DEVICE)
+			iscsit_stop_dataout_timer(cmd);
+		/*
+		 * Fallthrough
+		 */
 	case ISCSI_OP_SCSI_TMFUNC:
 		transport_generic_free_cmd(&cmd->se_cmd, 1);
 		break;
@@ -704,7 +723,7 @@
 		}
 		/* Fall-through */
 	default:
-		iscsit_release_cmd(cmd);
+		cmd->release_cmd(cmd);
 		break;
 	}
 }
@@ -1226,34 +1245,19 @@
  */
 int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_detail)
 {
-	u8 iscsi_hdr[ISCSI_HDR_LEN];
-	int err;
-	struct kvec iov;
 	struct iscsi_login_rsp *hdr;
+	struct iscsi_login *login = conn->conn_login;
 
+	login->login_failed = 1;
 	iscsit_collect_login_stats(conn, status_class, status_detail);
 
-	memset(&iov, 0, sizeof(struct kvec));
-	memset(&iscsi_hdr, 0x0, ISCSI_HDR_LEN);
-
-	hdr	= (struct iscsi_login_rsp *)&iscsi_hdr;
+	hdr	= (struct iscsi_login_rsp *)&login->rsp[0];
 	hdr->opcode		= ISCSI_OP_LOGIN_RSP;
 	hdr->status_class	= status_class;
 	hdr->status_detail	= status_detail;
 	hdr->itt		= conn->login_itt;
 
-	iov.iov_base		= &iscsi_hdr;
-	iov.iov_len		= ISCSI_HDR_LEN;
-
-	PRINT_BUFF(iscsi_hdr, ISCSI_HDR_LEN);
-
-	err = tx_data(conn, &iov, 1, ISCSI_HDR_LEN);
-	if (err != ISCSI_HDR_LEN) {
-		pr_err("tx_data returned less than expected\n");
-		return -1;
-	}
-
-	return 0;
+	return conn->conn_transport->iscsit_put_login_tx(conn, login, 0);
 }
 
 void iscsit_print_session_params(struct iscsi_session *sess)
@@ -1432,7 +1436,8 @@
 		strcpy(ls->last_intr_fail_name,
 		       (intrname ? intrname->value : "Unknown"));
 
-		ls->last_intr_fail_ip_family = conn->sock->sk->sk_family;
+		ls->last_intr_fail_ip_family = conn->login_family;
+
 		snprintf(ls->last_intr_fail_ip_addr, IPV6_ADDRESS_SPACE,
 				"%s", conn->login_ip);
 		ls->last_fail_time = get_jiffies_64();
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 894d0f8..4f8e01a 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -8,6 +8,7 @@
 extern struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsi_cmd *);
 extern void iscsit_free_r2t(struct iscsi_r2t *, struct iscsi_cmd *);
 extern void iscsit_free_r2ts_from_list(struct iscsi_cmd *);
+extern struct iscsi_cmd *iscsit_alloc_cmd(struct iscsi_conn *, gfp_t);
 extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
 extern struct iscsi_seq *iscsit_get_seq_holder_for_datain(struct iscsi_cmd *, u32);
 extern struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsi_cmd *);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 17a6acb..58ed683 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -30,8 +30,10 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/module.h>
+#include <linux/falloc.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
+#include <asm/unaligned.h>
 
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
@@ -166,6 +168,33 @@
 			" block_device blocks: %llu logical_block_size: %d\n",
 			dev_size, div_u64(dev_size, fd_dev->fd_block_size),
 			fd_dev->fd_block_size);
+		/*
+		 * Check if the underlying struct block_device request_queue supports
+		 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
+		 * in ATA and we need to set TPE=1
+		 */
+		if (blk_queue_discard(q)) {
+			dev->dev_attrib.max_unmap_lba_count =
+				q->limits.max_discard_sectors;
+			/*
+			 * Currently hardcoded to 1 in Linux/SCSI code..
+			 */
+			dev->dev_attrib.max_unmap_block_desc_count = 1;
+			dev->dev_attrib.unmap_granularity =
+				q->limits.discard_granularity >> 9;
+			dev->dev_attrib.unmap_granularity_alignment =
+				q->limits.discard_alignment;
+			pr_debug("IFILE: BLOCK Discard support available,"
+					" disabled by default\n");
+		}
+		/*
+		 * Enable write same emulation for IBLOCK and use 0xFFFF as
+		 * the smaller WRITE_SAME(10) only has a two-byte block count.
+		 */
+		dev->dev_attrib.max_write_same_len = 0xFFFF;
+
+		if (blk_queue_nonrot(q))
+			dev->dev_attrib.is_nonrot = 1;
 	} else {
 		if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
 			pr_err("FILEIO: Missing fd_dev_size="
@@ -176,6 +205,23 @@
 
 		dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
 		dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
+
+		/*
+		 * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
+		 */
+		dev->dev_attrib.max_unmap_lba_count = 0x2000;
+		/*
+		 * Currently hardcoded to 1 in Linux/SCSI code..
+		 */
+		dev->dev_attrib.max_unmap_block_desc_count = 1;
+		dev->dev_attrib.unmap_granularity = 1;
+		dev->dev_attrib.unmap_granularity_alignment = 0;
+
+		/*
+		 * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
+		 * based upon struct iovec limit for vfs_writev()
+		 */
+		dev->dev_attrib.max_write_same_len = 0x1000;
 	}
 
 	fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
@@ -190,11 +236,6 @@
 
 	fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
 	fd_dev->fd_queue_depth = dev->queue_depth;
-	/*
-	 * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
-	 * based upon struct iovec limit for vfs_writev()
-	 */
-	dev->dev_attrib.max_write_same_len = 0x1000;
 
 	pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
 		" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
@@ -442,6 +483,75 @@
 }
 
 static sense_reason_t
+fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
+{
+	struct file *file = priv;
+	struct inode *inode = file->f_mapping->host;
+	int ret;
+
+	if (S_ISBLK(inode->i_mode)) {
+		/* The backend is block device, use discard */
+		struct block_device *bdev = inode->i_bdev;
+
+		ret = blkdev_issue_discard(bdev, lba,
+				nolb, GFP_KERNEL, 0);
+		if (ret < 0) {
+			pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
+				ret);
+			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		}
+	} else {
+		/* The backend is normal file, use fallocate */
+		struct se_device *se_dev = cmd->se_dev;
+		loff_t pos = lba * se_dev->dev_attrib.block_size;
+		unsigned int len = nolb * se_dev->dev_attrib.block_size;
+		int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+
+		if (!file->f_op->fallocate)
+			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+		ret = file->f_op->fallocate(file, mode, pos, len);
+		if (ret < 0) {
+			pr_warn("FILEIO: fallocate() failed: %d\n", ret);
+			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+		}
+	}
+
+	return 0;
+}
+
+static sense_reason_t
+fd_execute_write_same_unmap(struct se_cmd *cmd)
+{
+	struct se_device *se_dev = cmd->se_dev;
+	struct fd_dev *fd_dev = FD_DEV(se_dev);
+	struct file *file = fd_dev->fd_file;
+	sector_t lba = cmd->t_task_lba;
+	sector_t nolb = sbc_get_write_same_sectors(cmd);
+	int ret;
+
+	if (!nolb) {
+		target_complete_cmd(cmd, SAM_STAT_GOOD);
+		return 0;
+	}
+
+	ret = fd_do_unmap(cmd, file, lba, nolb);
+	if (ret)
+		return ret;
+
+	target_complete_cmd(cmd, GOOD);
+	return 0;
+}
+
+static sense_reason_t
+fd_execute_unmap(struct se_cmd *cmd)
+{
+	struct file *file = FD_DEV(cmd->se_dev)->fd_file;
+
+	return sbc_execute_unmap(cmd, fd_do_unmap, file);
+}
+
+static sense_reason_t
 fd_execute_rw(struct se_cmd *cmd)
 {
 	struct scatterlist *sgl = cmd->t_data_sg;
@@ -600,6 +710,8 @@
 	.execute_rw		= fd_execute_rw,
 	.execute_sync_cache	= fd_execute_sync_cache,
 	.execute_write_same	= fd_execute_write_same,
+	.execute_write_same_unmap = fd_execute_write_same_unmap,
+	.execute_unmap		= fd_execute_unmap,
 };
 
 static sense_reason_t
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 8bcc514..07f5f94 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -380,104 +380,40 @@
 }
 
 static sense_reason_t
+iblock_do_unmap(struct se_cmd *cmd, void *priv,
+		sector_t lba, sector_t nolb)
+{
+	struct block_device *bdev = priv;
+	int ret;
+
+	ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
+	if (ret < 0) {
+		pr_err("blkdev_issue_discard() failed: %d\n", ret);
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+	}
+
+	return 0;
+}
+
+static sense_reason_t
 iblock_execute_unmap(struct se_cmd *cmd)
 {
-	struct se_device *dev = cmd->se_dev;
-	struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
-	unsigned char *buf, *ptr = NULL;
-	sector_t lba;
-	int size;
-	u32 range;
-	sense_reason_t ret = 0;
-	int dl, bd_dl, err;
+	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
 
-	/* We never set ANC_SUP */
-	if (cmd->t_task_cdb[1])
-		return TCM_INVALID_CDB_FIELD;
-
-	if (cmd->data_length == 0) {
-		target_complete_cmd(cmd, SAM_STAT_GOOD);
-		return 0;
-	}
-
-	if (cmd->data_length < 8) {
-		pr_warn("UNMAP parameter list length %u too small\n",
-			cmd->data_length);
-		return TCM_PARAMETER_LIST_LENGTH_ERROR;
-	}
-
-	buf = transport_kmap_data_sg(cmd);
-	if (!buf)
-		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
-	dl = get_unaligned_be16(&buf[0]);
-	bd_dl = get_unaligned_be16(&buf[2]);
-
-	size = cmd->data_length - 8;
-	if (bd_dl > size)
-		pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
-			cmd->data_length, bd_dl);
-	else
-		size = bd_dl;
-
-	if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
-		ret = TCM_INVALID_PARAMETER_LIST;
-		goto err;
-	}
-
-	/* First UNMAP block descriptor starts at 8 byte offset */
-	ptr = &buf[8];
-	pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
-		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
-
-	while (size >= 16) {
-		lba = get_unaligned_be64(&ptr[0]);
-		range = get_unaligned_be32(&ptr[8]);
-		pr_debug("UNMAP: Using lba: %llu and range: %u\n",
-				 (unsigned long long)lba, range);
-
-		if (range > dev->dev_attrib.max_unmap_lba_count) {
-			ret = TCM_INVALID_PARAMETER_LIST;
-			goto err;
-		}
-
-		if (lba + range > dev->transport->get_blocks(dev) + 1) {
-			ret = TCM_ADDRESS_OUT_OF_RANGE;
-			goto err;
-		}
-
-		err = blkdev_issue_discard(ib_dev->ibd_bd, lba, range,
-					   GFP_KERNEL, 0);
-		if (err < 0) {
-			pr_err("blkdev_issue_discard() failed: %d\n",
-					err);
-			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-			goto err;
-		}
-
-		ptr += 16;
-		size -= 16;
-	}
-
-err:
-	transport_kunmap_data_sg(cmd);
-	if (!ret)
-		target_complete_cmd(cmd, GOOD);
-	return ret;
+	return sbc_execute_unmap(cmd, iblock_do_unmap, bdev);
 }
 
 static sense_reason_t
 iblock_execute_write_same_unmap(struct se_cmd *cmd)
 {
-	struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
-	int rc;
+	struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
+	sector_t lba = cmd->t_task_lba;
+	sector_t nolb = sbc_get_write_same_sectors(cmd);
+	int ret;
 
-	rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba,
-			sbc_get_write_same_sectors(cmd), GFP_KERNEL, 0);
-	if (rc < 0) {
-		pr_warn("blkdev_issue_discard() failed: %d\n", rc);
-		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-	}
+	ret = iblock_do_unmap(cmd, bdev, lba, nolb);
+	if (ret)
+		return ret;
 
 	target_complete_cmd(cmd, GOOD);
 	return 0;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 60d4b51..bbc5b0e 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -596,3 +596,88 @@
 	return TYPE_DISK;
 }
 EXPORT_SYMBOL(sbc_get_device_type);
+
+sense_reason_t
+sbc_execute_unmap(struct se_cmd *cmd,
+	sense_reason_t (*do_unmap_fn)(struct se_cmd *, void *,
+				      sector_t, sector_t),
+	void *priv)
+{
+	struct se_device *dev = cmd->se_dev;
+	unsigned char *buf, *ptr = NULL;
+	sector_t lba;
+	int size;
+	u32 range;
+	sense_reason_t ret = 0;
+	int dl, bd_dl;
+
+	/* We never set ANC_SUP */
+	if (cmd->t_task_cdb[1])
+		return TCM_INVALID_CDB_FIELD;
+
+	if (cmd->data_length == 0) {
+		target_complete_cmd(cmd, SAM_STAT_GOOD);
+		return 0;
+	}
+
+	if (cmd->data_length < 8) {
+		pr_warn("UNMAP parameter list length %u too small\n",
+			cmd->data_length);
+		return TCM_PARAMETER_LIST_LENGTH_ERROR;
+	}
+
+	buf = transport_kmap_data_sg(cmd);
+	if (!buf)
+		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
+	dl = get_unaligned_be16(&buf[0]);
+	bd_dl = get_unaligned_be16(&buf[2]);
+
+	size = cmd->data_length - 8;
+	if (bd_dl > size)
+		pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n",
+			cmd->data_length, bd_dl);
+	else
+		size = bd_dl;
+
+	if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
+		ret = TCM_INVALID_PARAMETER_LIST;
+		goto err;
+	}
+
+	/* First UNMAP block descriptor starts at 8 byte offset */
+	ptr = &buf[8];
+	pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
+		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
+
+	while (size >= 16) {
+		lba = get_unaligned_be64(&ptr[0]);
+		range = get_unaligned_be32(&ptr[8]);
+		pr_debug("UNMAP: Using lba: %llu and range: %u\n",
+				 (unsigned long long)lba, range);
+
+		if (range > dev->dev_attrib.max_unmap_lba_count) {
+			ret = TCM_INVALID_PARAMETER_LIST;
+			goto err;
+		}
+
+		if (lba + range > dev->transport->get_blocks(dev) + 1) {
+			ret = TCM_ADDRESS_OUT_OF_RANGE;
+			goto err;
+		}
+
+		ret = do_unmap_fn(cmd, priv, lba, range);
+		if (ret)
+			goto err;
+
+		ptr += 16;
+		size -= 16;
+	}
+
+err:
+	transport_kunmap_data_sg(cmd);
+	if (!ret)
+		target_complete_cmd(cmd, GOOD);
+	return ret;
+}
+EXPORT_SYMBOL(sbc_execute_unmap);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 3243ea7..f8388b4 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -65,7 +65,6 @@
 static void transport_handle_queue_full(struct se_cmd *cmd,
 		struct se_device *dev);
 static int transport_generic_get_mem(struct se_cmd *cmd);
-static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
 static void transport_put_cmd(struct se_cmd *cmd);
 static void target_complete_ok_work(struct work_struct *work);
 
@@ -2179,7 +2178,7 @@
  * @se_cmd:	command descriptor to add
  * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
  */
-static int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
+int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
 			       bool ack_kref)
 {
 	unsigned long flags;
@@ -2208,6 +2207,7 @@
 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 	return ret;
 }
+EXPORT_SYMBOL(target_get_sess_cmd);
 
 static void target_release_cmd_kref(struct kref *kref)
 {
@@ -2765,8 +2765,13 @@
 		/* CURRENT ERROR */
 		buffer[0] = 0x70;
 		buffer[SPC_ADD_SENSE_LEN_OFFSET] = 10;
-		/* ILLEGAL REQUEST */
-		buffer[SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+		/*
+		 * Returning ILLEGAL REQUEST would cause immediate IO errors on
+		 * Solaris initiators.  Returning NOT READY instead means the
+		 * operations will be retried a finite number of times and we
+		 * can survive intermittent errors.
+		 */
+		buffer[SPC_SENSE_KEY_OFFSET] = NOT_READY;
 		/* LOGICAL UNIT COMMUNICATION FAILURE */
 		buffer[SPC_ASC_KEY_OFFSET] = 0x08;
 		break;
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index b6fd4cf..e415af3 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -103,6 +103,13 @@
 	use_sg = !(remaining % 4);
 
 	while (remaining) {
+		struct fc_seq *seq = cmd->seq;
+
+		if (!seq) {
+			pr_debug("%s: Command aborted, xid 0x%x\n",
+				 __func__, ep->xid);
+			break;
+		}
 		if (!mem_len) {
 			sg = sg_next(sg);
 			mem_len = min((size_t)sg->length, remaining);
@@ -169,7 +176,7 @@
 			f_ctl |= FC_FC_END_SEQ;
 		fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
 			       FC_TYPE_FCP, f_ctl, fh_off);
-		error = lport->tt.seq_send(lport, cmd->seq, fp);
+		error = lport->tt.seq_send(lport, seq, fp);
 		if (error) {
 			/* XXX For now, initiator will retry */
 			pr_err_ratelimited("%s: Failed to send frame %p, "
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 113f335..4859505 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -428,19 +428,12 @@
 	return ret;
 }
 
-static void ft_sess_rcu_free(struct rcu_head *rcu)
-{
-	struct ft_sess *sess = container_of(rcu, struct ft_sess, rcu);
-
-	kfree(sess);
-}
-
 static void ft_sess_free(struct kref *kref)
 {
 	struct ft_sess *sess = container_of(kref, struct ft_sess, kref);
 
 	transport_deregister_session(sess->se_sess);
-	call_rcu(&sess->rcu, ft_sess_rcu_free);
+	kfree_rcu(sess, rcu);
 }
 
 void ft_sess_put(struct ft_sess *sess)
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 957a0b9..1677238 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -66,11 +66,13 @@
  * TODO: debug and remove the workaround.
  */
 enum {
-	VHOST_SCSI_FEATURES = VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)
+	VHOST_SCSI_FEATURES = (VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)) |
+			      (1ULL << VIRTIO_SCSI_F_HOTPLUG)
 };
 
 #define VHOST_SCSI_MAX_TARGET	256
 #define VHOST_SCSI_MAX_VQ	128
+#define VHOST_SCSI_MAX_EVENT	128
 
 struct vhost_scsi {
 	/* Protected by vhost_scsi->dev.mutex */
@@ -82,6 +84,12 @@
 
 	struct vhost_work vs_completion_work; /* cmd completion work item */
 	struct llist_head vs_completion_list; /* cmd completion queue */
+
+	struct vhost_work vs_event_work; /* evt injection work item */
+	struct llist_head vs_event_list; /* evt injection queue */
+
+	bool vs_events_missed; /* any missed events, protected by vq->mutex */
+	int vs_events_nr; /* num of pending events, protected by vq->mutex */
 };
 
 /* Local pointer to allocated TCM configfs fabric module */
@@ -349,6 +357,37 @@
 	return 0;
 }
 
+static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
+{
+	vs->vs_events_nr--;
+	kfree(evt);
+}
+
+static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
+	u32 event, u32 reason)
+{
+	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
+	struct tcm_vhost_evt *evt;
+
+	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
+		vs->vs_events_missed = true;
+		return NULL;
+	}
+
+	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
+	if (!evt) {
+		vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
+		vs->vs_events_missed = true;
+		return NULL;
+	}
+
+	evt->event.event = event;
+	evt->event.reason = reason;
+	vs->vs_events_nr++;
+
+	return evt;
+}
+
 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
 {
 	struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
@@ -367,6 +406,75 @@
 	kfree(tv_cmd);
 }
 
+static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
+	struct tcm_vhost_evt *evt)
+{
+	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
+	struct virtio_scsi_event *event = &evt->event;
+	struct virtio_scsi_event __user *eventp;
+	unsigned out, in;
+	int head, ret;
+
+	if (!vq->private_data) {
+		vs->vs_events_missed = true;
+		return;
+	}
+
+again:
+	vhost_disable_notify(&vs->dev, vq);
+	head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
+			ARRAY_SIZE(vq->iov), &out, &in,
+			NULL, NULL);
+	if (head < 0) {
+		vs->vs_events_missed = true;
+		return;
+	}
+	if (head == vq->num) {
+		if (vhost_enable_notify(&vs->dev, vq))
+			goto again;
+		vs->vs_events_missed = true;
+		return;
+	}
+
+	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
+		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
+				vq->iov[out].iov_len);
+		vs->vs_events_missed = true;
+		return;
+	}
+
+	if (vs->vs_events_missed) {
+		event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
+		vs->vs_events_missed = false;
+	}
+
+	eventp = vq->iov[out].iov_base;
+	ret = __copy_to_user(eventp, event, sizeof(*event));
+	if (!ret)
+		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
+	else
+		vq_err(vq, "Faulted on tcm_vhost_send_event\n");
+}
+
+static void tcm_vhost_evt_work(struct vhost_work *work)
+{
+	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
+					vs_event_work);
+	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
+	struct tcm_vhost_evt *evt;
+	struct llist_node *llnode;
+
+	mutex_lock(&vq->mutex);
+	llnode = llist_del_all(&vs->vs_event_list);
+	while (llnode) {
+		evt = llist_entry(llnode, struct tcm_vhost_evt, list);
+		llnode = llist_next(llnode);
+		tcm_vhost_do_evt_work(vs, evt);
+		tcm_vhost_free_evt(vs, evt);
+	}
+	mutex_unlock(&vq->mutex);
+}
+
 /* Fill in status and signal that we are done processing this command
  *
  * This is scheduled in the vhost work queue so we are called with the owner
@@ -777,9 +885,46 @@
 	pr_debug("%s: The handling func for control queue.\n", __func__);
 }
 
+static void tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
+	struct se_lun *lun, u32 event, u32 reason)
+{
+	struct tcm_vhost_evt *evt;
+
+	evt = tcm_vhost_allocate_evt(vs, event, reason);
+	if (!evt)
+		return;
+
+	if (tpg && lun) {
+		/* TODO: share lun setup code with virtio-scsi.ko */
+		/*
+		 * Note: evt->event is zeroed when we allocate it and
+		 * lun[4-7] need to be zero according to virtio-scsi spec.
+		 */
+		evt->event.lun[0] = 0x01;
+		evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
+		if (lun->unpacked_lun >= 256)
+			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
+		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
+	}
+
+	llist_add(&evt->list, &vs->vs_event_list);
+	vhost_work_queue(&vs->dev, &vs->vs_event_work);
+}
+
 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
 {
-	pr_debug("%s: The handling func for event queue.\n", __func__);
+	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+						poll.work);
+	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
+
+	mutex_lock(&vq->mutex);
+	if (!vq->private_data)
+		goto out;
+
+	if (vs->vs_events_missed)
+		tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
+out:
+	mutex_unlock(&vq->mutex);
 }
 
 static void vhost_scsi_handle_kick(struct vhost_work *work)
@@ -803,11 +948,15 @@
 	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
 		vhost_scsi_flush_vq(vs, i);
 	vhost_work_flush(&vs->dev, &vs->vs_completion_work);
+	vhost_work_flush(&vs->dev, &vs->vs_event_work);
 }
 
 /*
  * Called from vhost_scsi_ioctl() context to walk the list of available
  * tcm_vhost_tpg with an active struct tcm_vhost_nexus
+ *
+ *  The lock nesting rule is:
+ *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
  */
 static int vhost_scsi_set_endpoint(
 	struct vhost_scsi *vs,
@@ -820,26 +969,27 @@
 	int index, ret, i, len;
 	bool match = false;
 
+	mutex_lock(&tcm_vhost_mutex);
 	mutex_lock(&vs->dev.mutex);
+
 	/* Verify that ring has been setup correctly. */
 	for (index = 0; index < vs->dev.nvqs; ++index) {
 		/* Verify that ring has been setup correctly. */
 		if (!vhost_vq_access_ok(&vs->vqs[index])) {
-			mutex_unlock(&vs->dev.mutex);
-			return -EFAULT;
+			ret = -EFAULT;
+			goto out;
 		}
 	}
 
 	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
 	vs_tpg = kzalloc(len, GFP_KERNEL);
 	if (!vs_tpg) {
-		mutex_unlock(&vs->dev.mutex);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out;
 	}
 	if (vs->vs_tpg)
 		memcpy(vs_tpg, vs->vs_tpg, len);
 
-	mutex_lock(&tcm_vhost_mutex);
 	list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
 		mutex_lock(&tv_tpg->tv_tpg_mutex);
 		if (!tv_tpg->tpg_nexus) {
@@ -854,20 +1004,19 @@
 
 		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
 			if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
-				mutex_unlock(&tv_tpg->tv_tpg_mutex);
-				mutex_unlock(&tcm_vhost_mutex);
-				mutex_unlock(&vs->dev.mutex);
 				kfree(vs_tpg);
-				return -EEXIST;
+				mutex_unlock(&tv_tpg->tv_tpg_mutex);
+				ret = -EEXIST;
+				goto out;
 			}
 			tv_tpg->tv_tpg_vhost_count++;
+			tv_tpg->vhost_scsi = vs;
 			vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
 			smp_mb__after_atomic_inc();
 			match = true;
 		}
 		mutex_unlock(&tv_tpg->tv_tpg_mutex);
 	}
-	mutex_unlock(&tcm_vhost_mutex);
 
 	if (match) {
 		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
@@ -893,7 +1042,9 @@
 	kfree(vs->vs_tpg);
 	vs->vs_tpg = vs_tpg;
 
+out:
 	mutex_unlock(&vs->dev.mutex);
+	mutex_unlock(&tcm_vhost_mutex);
 	return ret;
 }
 
@@ -908,6 +1059,7 @@
 	int index, ret, i;
 	u8 target;
 
+	mutex_lock(&tcm_vhost_mutex);
 	mutex_lock(&vs->dev.mutex);
 	/* Verify that ring has been setup correctly. */
 	for (index = 0; index < vs->dev.nvqs; ++index) {
@@ -918,8 +1070,8 @@
 	}
 
 	if (!vs->vs_tpg) {
-		mutex_unlock(&vs->dev.mutex);
-		return 0;
+		ret = 0;
+		goto err_dev;
 	}
 
 	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
@@ -944,6 +1096,7 @@
 			goto err_tpg;
 		}
 		tv_tpg->tv_tpg_vhost_count--;
+		tv_tpg->vhost_scsi = NULL;
 		vs->vs_tpg[target] = NULL;
 		match = true;
 		mutex_unlock(&tv_tpg->tv_tpg_mutex);
@@ -964,14 +1117,16 @@
 	vhost_scsi_flush(vs);
 	kfree(vs->vs_tpg);
 	vs->vs_tpg = NULL;
+	WARN_ON(vs->vs_events_nr);
 	mutex_unlock(&vs->dev.mutex);
-
+	mutex_unlock(&tcm_vhost_mutex);
 	return 0;
 
 err_tpg:
 	mutex_unlock(&tv_tpg->tv_tpg_mutex);
 err_dev:
 	mutex_unlock(&vs->dev.mutex);
+	mutex_unlock(&tcm_vhost_mutex);
 	return ret;
 }
 
@@ -1003,6 +1158,10 @@
 		return -ENOMEM;
 
 	vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
+	vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
+
+	s->vs_events_nr = 0;
+	s->vs_events_missed = false;
 
 	s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
 	s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
@@ -1029,6 +1188,8 @@
 	vhost_scsi_clear_endpoint(s, &t);
 	vhost_dev_stop(&s->dev);
 	vhost_dev_cleanup(&s->dev, false);
+	/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
+	vhost_scsi_flush(s);
 	kfree(s);
 	return 0;
 }
@@ -1040,8 +1201,11 @@
 	struct vhost_scsi_target backend;
 	void __user *argp = (void __user *)arg;
 	u64 __user *featurep = argp;
+	u32 __user *eventsp = argp;
+	u32 events_missed;
 	u64 features;
 	int r, abi_version = VHOST_SCSI_ABI_VERSION;
+	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
 
 	switch (ioctl) {
 	case VHOST_SCSI_SET_ENDPOINT:
@@ -1062,6 +1226,20 @@
 		if (copy_to_user(argp, &abi_version, sizeof abi_version))
 			return -EFAULT;
 		return 0;
+	case VHOST_SCSI_SET_EVENTS_MISSED:
+		if (get_user(events_missed, eventsp))
+			return -EFAULT;
+		mutex_lock(&vq->mutex);
+		vs->vs_events_missed = events_missed;
+		mutex_unlock(&vq->mutex);
+		return 0;
+	case VHOST_SCSI_GET_EVENTS_MISSED:
+		mutex_lock(&vq->mutex);
+		events_missed = vs->vs_events_missed;
+		mutex_unlock(&vq->mutex);
+		if (put_user(events_missed, eventsp))
+			return -EFAULT;
+		return 0;
 	case VHOST_GET_FEATURES:
 		features = VHOST_SCSI_FEATURES;
 		if (copy_to_user(featurep, &features, sizeof features))
@@ -1133,28 +1311,80 @@
 	return "Unknown";
 }
 
+static void tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
+	struct se_lun *lun, bool plug)
+{
+
+	struct vhost_scsi *vs = tpg->vhost_scsi;
+	struct vhost_virtqueue *vq;
+	u32 reason;
+
+	if (!vs)
+		return;
+
+	mutex_lock(&vs->dev.mutex);
+	if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
+		mutex_unlock(&vs->dev.mutex);
+		return;
+	}
+
+	if (plug)
+		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
+	else
+		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
+
+	vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
+	mutex_lock(&vq->mutex);
+	tcm_vhost_send_evt(vs, tpg, lun,
+			VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
+	mutex_unlock(&vq->mutex);
+	mutex_unlock(&vs->dev.mutex);
+}
+
+static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
+{
+	tcm_vhost_do_plug(tpg, lun, true);
+}
+
+static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
+{
+	tcm_vhost_do_plug(tpg, lun, false);
+}
+
 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
 	struct se_lun *lun)
 {
 	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
 				struct tcm_vhost_tpg, se_tpg);
 
+	mutex_lock(&tcm_vhost_mutex);
+
 	mutex_lock(&tv_tpg->tv_tpg_mutex);
 	tv_tpg->tv_tpg_port_count++;
 	mutex_unlock(&tv_tpg->tv_tpg_mutex);
 
+	tcm_vhost_hotplug(tv_tpg, lun);
+
+	mutex_unlock(&tcm_vhost_mutex);
+
 	return 0;
 }
 
 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
-	struct se_lun *se_lun)
+	struct se_lun *lun)
 {
 	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
 				struct tcm_vhost_tpg, se_tpg);
 
+	mutex_lock(&tcm_vhost_mutex);
+
 	mutex_lock(&tv_tpg->tv_tpg_mutex);
 	tv_tpg->tv_tpg_port_count--;
 	mutex_unlock(&tv_tpg->tv_tpg_mutex);
+
+	tcm_vhost_hotunplug(tv_tpg, lun);
+
+	mutex_unlock(&tcm_vhost_mutex);
 }
 
 static struct se_node_acl *tcm_vhost_make_nodeacl(
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
index 1d2ae7a..514b9fd 100644
--- a/drivers/vhost/tcm_vhost.h
+++ b/drivers/vhost/tcm_vhost.h
@@ -53,6 +53,7 @@
 	struct se_node_acl se_node_acl;
 };
 
+struct vhost_scsi;
 struct tcm_vhost_tpg {
 	/* Vhost port target portal group tag for TCM */
 	u16 tport_tpgt;
@@ -70,6 +71,8 @@
 	struct tcm_vhost_tport *tport;
 	/* Returned by tcm_vhost_make_tpg() */
 	struct se_portal_group se_tpg;
+	/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
+	struct vhost_scsi *vhost_scsi;
 };
 
 struct tcm_vhost_tport {
@@ -83,6 +86,13 @@
 	struct se_wwn tport_wwn;
 };
 
+struct tcm_vhost_evt {
+	/* event to be sent to guest */
+	struct virtio_scsi_event event;
+	/* event list, serviced from vhost worker thread */
+	struct llist_node list;
+};
+
 /*
  * As per request from MST, keep TCM_VHOST related ioctl defines out of
  * linux/vhost.h (user-space) for now..
@@ -113,3 +123,6 @@
 #define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
 /* Changing this breaks userspace. */
 #define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int)
+/* Set and get the events missed flag */
+#define VHOST_SCSI_SET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x43, __u32)
+#define VHOST_SCSI_GET_EVENTS_MISSED _IOW(VHOST_VIRTIO, 0x44, __u32)
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index 01fd5c1..f704458 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -247,6 +247,7 @@
 	struct dlm_ls *ls;
 	struct plock_op *op;
 	int rv;
+	unsigned char fl_flags = fl->fl_flags;
 
 	ls = dlm_find_lockspace_local(lockspace);
 	if (!ls)
@@ -258,9 +259,18 @@
 		goto out;
 	}
 
-	if (posix_lock_file_wait(file, fl) < 0)
-		log_error(ls, "dlm_posix_unlock: vfs unlock error %llx",
-			  (unsigned long long)number);
+	/* cause the vfs unlock to return ENOENT if lock is not found */
+	fl->fl_flags |= FL_EXISTS;
+
+	rv = posix_lock_file_wait(file, fl);
+	if (rv == -ENOENT) {
+		rv = 0;
+		goto out_free;
+	}
+	if (rv < 0) {
+		log_error(ls, "dlm_posix_unlock: vfs unlock error %d %llx",
+			  rv, (unsigned long long)number);
+	}
 
 	op->info.optype		= DLM_PLOCK_OP_UNLOCK;
 	op->info.pid		= fl->fl_pid;
@@ -296,9 +306,11 @@
 	if (rv == -ENOENT)
 		rv = 0;
 
+out_free:
 	kfree(op);
 out:
 	dlm_put_lockspace(ls);
+	fl->fl_flags = fl_flags;
 	return rv;
 }
 EXPORT_SYMBOL_GPL(dlm_posix_unlock);
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 24f414f..9883694 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -1055,7 +1055,7 @@
 		if (atomic_read(&bh->b_count))
 			goto cannot_release;
 		bd = bh->b_private;
-		if (bd && bd->bd_ail)
+		if (bd && bd->bd_tr)
 			goto cannot_release;
 		if (buffer_pinned(bh) || buffer_dirty(bh))
 			goto not_possible;
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 5e83657..1dc9a13 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -787,7 +787,7 @@
 		goto out_rlist;
 
 	if (gfs2_rs_active(ip->i_res)) /* needs to be done with the rgrp glock held */
-		gfs2_rs_deltree(ip, ip->i_res);
+		gfs2_rs_deltree(ip->i_res);
 
 	error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE +
 				 RES_INDIRECT + RES_STATFS + RES_QUOTA,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index bd82235..9435384 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -912,7 +912,7 @@
  */
 
 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
-			    unsigned long delay)
+			    unsigned long delay, bool remote)
 {
 	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
 
@@ -925,8 +925,8 @@
 		gl->gl_demote_state = LM_ST_UNLOCKED;
 	}
 	if (gl->gl_ops->go_callback)
-		gl->gl_ops->go_callback(gl);
-	trace_gfs2_demote_rq(gl);
+		gl->gl_ops->go_callback(gl, remote);
+	trace_gfs2_demote_rq(gl, remote);
 }
 
 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
@@ -1091,7 +1091,7 @@
 
 	spin_lock(&gl->gl_spin);
 	if (gh->gh_flags & GL_NOCACHE)
-		handle_callback(gl, LM_ST_UNLOCKED, 0);
+		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
 
 	list_del_init(&gh->gh_list);
 	if (find_first_holder(gl) == NULL) {
@@ -1279,19 +1279,6 @@
 		gfs2_glock_dq(&ghs[num_gh]);
 }
 
-/**
- * gfs2_glock_dq_uninit_m - release multiple glocks
- * @num_gh: the number of structures
- * @ghs: an array of struct gfs2_holder structures
- *
- */
-
-void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
-{
-	while (num_gh--)
-		gfs2_glock_dq_uninit(&ghs[num_gh]);
-}
-
 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
 {
 	unsigned long delay = 0;
@@ -1309,7 +1296,7 @@
 	}
 
 	spin_lock(&gl->gl_spin);
-	handle_callback(gl, state, delay);
+	handle_callback(gl, state, delay, true);
 	spin_unlock(&gl->gl_spin);
 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
 		gfs2_glock_put(gl);
@@ -1422,7 +1409,7 @@
 		spin_unlock(&lru_lock);
 		spin_lock(&gl->gl_spin);
 		if (demote_ok(gl))
-			handle_callback(gl, LM_ST_UNLOCKED, 0);
+			handle_callback(gl, LM_ST_UNLOCKED, 0, false);
 		WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
 		smp_mb__after_clear_bit();
 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
@@ -1547,7 +1534,7 @@
 
 	spin_lock(&gl->gl_spin);
 	if (gl->gl_state != LM_ST_UNLOCKED)
-		handle_callback(gl, LM_ST_UNLOCKED, 0);
+		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
 	spin_unlock(&gl->gl_spin);
 	gfs2_glock_hold(gl);
 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
@@ -1590,6 +1577,7 @@
 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
 {
 	set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
+	flush_workqueue(glock_workqueue);
 	glock_hash_walk(clear_glock, sdp);
 	flush_workqueue(glock_workqueue);
 	wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index fd580b7..69f66e3 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -201,7 +201,6 @@
 			     struct gfs2_holder *gh);
 extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
 extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
-extern void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
 extern int gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { gfs2_dump_glock(NULL, gl); BUG(); } } while(0)
 extern __printf(2, 3)
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 444b650..c66e99c 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -515,12 +515,12 @@
  *
  * gl_spin lock is held while calling this
  */
-static void iopen_go_callback(struct gfs2_glock *gl)
+static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
 {
 	struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
 	struct gfs2_sbd *sdp = gl->gl_sbd;
 
-	if (sdp->sd_vfs->s_flags & MS_RDONLY)
+	if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
 		return;
 
 	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 5c29216..26aabd7 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -31,7 +31,6 @@
 struct gfs2_glock;
 struct gfs2_quota_data;
 struct gfs2_trans;
-struct gfs2_ail;
 struct gfs2_jdesc;
 struct gfs2_sbd;
 struct lm_lockops;
@@ -53,7 +52,7 @@
 
 struct gfs2_log_operations {
 	void (*lo_before_commit) (struct gfs2_sbd *sdp);
-	void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai);
+	void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_trans *tr);
 	void (*lo_before_scan) (struct gfs2_jdesc *jd,
 				struct gfs2_log_header_host *head, int pass);
 	int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
@@ -139,7 +138,7 @@
 	struct list_head bd_list;
 	const struct gfs2_log_operations *bd_ops;
 
-	struct gfs2_ail *bd_ail;
+	struct gfs2_trans *bd_tr;
 	struct list_head bd_ail_st_list;
 	struct list_head bd_ail_gl_list;
 };
@@ -211,7 +210,7 @@
 	int (*go_lock) (struct gfs2_holder *gh);
 	void (*go_unlock) (struct gfs2_holder *gh);
 	int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
-	void (*go_callback) (struct gfs2_glock *gl);
+	void (*go_callback)(struct gfs2_glock *gl, bool remote);
 	const int go_type;
 	const unsigned long go_flags;
 #define GLOF_ASPACE 1
@@ -433,6 +432,7 @@
 	struct gfs2_holder tr_t_gh;
 
 	int tr_touched;
+	int tr_attached;
 
 	unsigned int tr_num_buf_new;
 	unsigned int tr_num_databuf_new;
@@ -440,14 +440,12 @@
 	unsigned int tr_num_databuf_rm;
 	unsigned int tr_num_revoke;
 	unsigned int tr_num_revoke_rm;
-};
 
-struct gfs2_ail {
-	struct list_head ai_list;
+	struct list_head tr_list;
 
-	unsigned int ai_first;
-	struct list_head ai_ail1_list;
-	struct list_head ai_ail2_list;
+	unsigned int tr_first;
+	struct list_head tr_ail1_list;
+	struct list_head tr_ail2_list;
 };
 
 struct gfs2_journal_extent {
@@ -710,6 +708,7 @@
 
 	spinlock_t sd_log_lock;
 
+	struct gfs2_trans *sd_log_tr;
 	unsigned int sd_log_blks_reserved;
 	unsigned int sd_log_commited_buf;
 	unsigned int sd_log_commited_databuf;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index cc00bd1..8833a4f 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -392,11 +392,15 @@
 	int error;
 	int dblocks = 1;
 
-	error = gfs2_inplace_reserve(ip, RES_DINODE, flags);
+	error = gfs2_quota_lock_check(ip);
 	if (error)
 		goto out;
 
-	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0);
+	error = gfs2_inplace_reserve(ip, RES_DINODE, flags);
+	if (error)
+		goto out_quota;
+
+	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 0);
 	if (error)
 		goto out_ipreserv;
 
@@ -409,6 +413,8 @@
 
 out_ipreserv:
 	gfs2_inplace_release(ip);
+out_quota:
+	gfs2_quota_unlock(ip);
 out:
 	return error;
 }
@@ -440,59 +446,27 @@
  */
 
 static void init_dinode(struct gfs2_inode *dip, struct gfs2_inode *ip,
-			const char *symname, struct buffer_head **bhp)
+			const char *symname)
 {
-	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
 	struct gfs2_dinode *di;
 	struct buffer_head *dibh;
-	struct timespec tv = CURRENT_TIME;
 
 	dibh = gfs2_meta_new(ip->i_gl, ip->i_no_addr);
 	gfs2_trans_add_meta(ip->i_gl, dibh);
-	gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI);
-	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
 	di = (struct gfs2_dinode *)dibh->b_data;
+	gfs2_dinode_out(ip, di);
 
-	di->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
-	di->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
-	di->di_mode = cpu_to_be32(ip->i_inode.i_mode);
-	di->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode));
-	di->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode));
-	di->di_nlink = 0;
-	di->di_size = cpu_to_be64(ip->i_inode.i_size);
-	di->di_blocks = cpu_to_be64(1);
-	di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(tv.tv_sec);
 	di->di_major = cpu_to_be32(MAJOR(ip->i_inode.i_rdev));
 	di->di_minor = cpu_to_be32(MINOR(ip->i_inode.i_rdev));
-	di->di_goal_meta = di->di_goal_data = cpu_to_be64(ip->i_no_addr);
-	di->di_generation = cpu_to_be64(ip->i_generation);
-	di->di_flags = 0;
 	di->__pad1 = 0;
-	di->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) ? GFS2_FORMAT_DE : 0);
-	di->di_height = 0;
 	di->__pad2 = 0;
 	di->__pad3 = 0;
-	di->di_depth = 0;
-	di->di_entries = 0;
 	memset(&di->__pad4, 0, sizeof(di->__pad4));
-	di->di_eattr = 0;
-	di->di_atime_nsec = cpu_to_be32(tv.tv_nsec);
-	di->di_mtime_nsec = cpu_to_be32(tv.tv_nsec);
-	di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec);
 	memset(&di->di_reserved, 0, sizeof(di->di_reserved));
+	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
 
 	switch(ip->i_inode.i_mode & S_IFMT) {
-	case S_IFREG:
-		if ((dip->i_diskflags & GFS2_DIF_INHERIT_JDATA) ||
-		    gfs2_tune_get(sdp, gt_new_files_jdata))
-			di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
-		break;
 	case S_IFDIR:
-		di->di_flags |= cpu_to_be32(dip->i_diskflags &
-					    GFS2_DIF_INHERIT_JDATA);
-		di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA);
-		di->di_size = cpu_to_be64(sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode));
-		di->di_entries = cpu_to_be32(2);
 		gfs2_init_dir(dibh, dip);
 		break;
 	case S_IFLNK:
@@ -501,63 +475,17 @@
 	}
 
 	set_buffer_uptodate(dibh);
-
-	*bhp = dibh;
-}
-
-static int make_dinode(struct gfs2_inode *dip, struct gfs2_inode *ip,
-		       const char *symname, struct buffer_head **bhp)
-{
-	struct inode *inode = &ip->i_inode;
-	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
-	int error;
-
-	error = gfs2_rindex_update(sdp);
-	if (error)
-		return error;
-
-	error = gfs2_quota_lock(dip, inode->i_uid, inode->i_gid);
-	if (error)
-		return error;
-
-	error = gfs2_quota_check(dip, inode->i_uid, inode->i_gid);
-	if (error)
-		goto out_quota;
-
-	error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0);
-	if (error)
-		goto out_quota;
-
-	init_dinode(dip, ip, symname, bhp);
-	gfs2_quota_change(dip, +1, inode->i_uid, inode->i_gid);
-	gfs2_trans_end(sdp);
-
-out_quota:
-	gfs2_quota_unlock(dip);
-	return error;
+	brelse(dibh);
 }
 
 static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
-		       struct gfs2_inode *ip)
+		       struct gfs2_inode *ip, int arq)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
-	int alloc_required;
-	struct buffer_head *dibh;
 	int error;
 
-	error = gfs2_rindex_update(sdp);
-	if (error)
-		return error;
-
-	error = gfs2_quota_lock(dip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
-	if (error)
-		goto fail;
-
-	error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name);
-	if (alloc_required < 0)
-		goto fail_quota_locks;
-	if (alloc_required) {
-		error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid);
+	if (arq) {
+		error = gfs2_quota_lock_check(dip);
 		if (error)
 			goto fail_quota_locks;
 
@@ -581,26 +509,12 @@
 	if (error)
 		goto fail_end_trans;
 
-	error = gfs2_meta_inode_buffer(ip, &dibh);
-	if (error)
-		goto fail_end_trans;
-	set_nlink(&ip->i_inode, S_ISDIR(ip->i_inode.i_mode) ? 2 : 1);
-	gfs2_trans_add_meta(ip->i_gl, dibh);
-	gfs2_dinode_out(ip, dibh->b_data);
-	brelse(dibh);
-	return 0;
-
 fail_end_trans:
 	gfs2_trans_end(sdp);
-
 fail_ipreserv:
-	if (alloc_required)
-		gfs2_inplace_release(dip);
-
+	gfs2_inplace_release(dip);
 fail_quota_locks:
 	gfs2_quota_unlock(dip);
-
-fail:
 	return error;
 }
 
@@ -650,8 +564,8 @@
 	struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
 	struct gfs2_glock *io_gl;
 	int error;
-	struct buffer_head *bh = NULL;
 	u32 aflags = 0;
+	int arq;
 
 	if (!name->len || name->len > GFS2_FNAMESIZE)
 		return -ENAMETOOLONG;
@@ -660,6 +574,10 @@
 	if (error)
 		return error;
 
+	error = gfs2_rindex_update(sdp);
+	if (error)
+		return error;
+
 	error = gfs2_glock_nq_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
 	if (error)
 		goto fail;
@@ -674,22 +592,48 @@
 	if (error)
 		goto fail_gunlock;
 
+	arq = error = gfs2_diradd_alloc_required(dir, name);
+	if (error < 0)
+		goto fail_gunlock;
+
 	inode = new_inode(sdp->sd_vfs);
-	if (!inode) {
-		gfs2_glock_dq_uninit(ghs);
-		return -ENOMEM;
-	}
+	error = -ENOMEM;
+	if (!inode)
+		goto fail_gunlock;
+
 	ip = GFS2_I(inode);
 	error = gfs2_rs_alloc(ip);
 	if (error)
 		goto fail_free_inode;
 
-	set_bit(GIF_INVALID, &ip->i_flags);
 	inode->i_mode = mode;
+	set_nlink(inode, S_ISDIR(mode) ? 2 : 1);
 	inode->i_rdev = dev;
 	inode->i_size = size;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	gfs2_set_inode_blocks(inode, 1);
 	munge_mode_uid_gid(dip, inode);
 	ip->i_goal = dip->i_goal;
+	ip->i_diskflags = 0;
+	ip->i_eattr = 0;
+	ip->i_height = 0;
+	ip->i_depth = 0;
+	ip->i_entries = 0;
+
+	switch(mode & S_IFMT) {
+	case S_IFREG:
+		if ((dip->i_diskflags & GFS2_DIF_INHERIT_JDATA) ||
+		    gfs2_tune_get(sdp, gt_new_files_jdata))
+			ip->i_diskflags |= GFS2_DIF_JDATA;
+		gfs2_set_aops(inode);
+		break;
+	case S_IFDIR:
+		ip->i_diskflags |= (dip->i_diskflags & GFS2_DIF_INHERIT_JDATA);
+		ip->i_diskflags |= GFS2_DIF_JDATA;
+		ip->i_entries = 2;
+		break;
+	}
+	gfs2_set_inode_flags(inode);
 
 	if ((GFS2_I(sdp->sd_root_dir->d_inode) == dip) ||
 	    (dip->i_diskflags & GFS2_DIF_TOPDIR))
@@ -708,10 +652,13 @@
 	if (error)
 		goto fail_free_inode;
 
-	error = make_dinode(dip, ip, symname, &bh);
+	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
 	if (error)
 		goto fail_gunlock2;
 
+	init_dinode(dip, ip, symname);
+	gfs2_trans_end(sdp);
+
 	error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
 	if (error)
 		goto fail_gunlock2;
@@ -725,10 +672,6 @@
 	gfs2_set_iop(inode);
 	insert_inode_hash(inode);
 
-	error = gfs2_inode_refresh(ip);
-	if (error)
-		goto fail_gunlock3;
-
 	error = gfs2_acl_create(dip, inode);
 	if (error)
 		goto fail_gunlock3;
@@ -737,18 +680,13 @@
 	if (error)
 		goto fail_gunlock3;
 
-	error = link_dinode(dip, name, ip);
+	error = link_dinode(dip, name, ip, arq);
 	if (error)
 		goto fail_gunlock3;
 
-	if (bh)
-		brelse(bh);
-
-	gfs2_trans_end(sdp);
-	gfs2_inplace_release(dip);
-	gfs2_quota_unlock(dip);
 	mark_inode_dirty(inode);
-	gfs2_glock_dq_uninit_m(2, ghs);
+	gfs2_glock_dq_uninit(ghs);
+	gfs2_glock_dq_uninit(ghs + 1);
 	d_instantiate(dentry, inode);
 	return 0;
 
@@ -769,12 +707,12 @@
 fail_gunlock:
 	gfs2_glock_dq_uninit(ghs);
 	if (inode && !IS_ERR(inode)) {
+		clear_nlink(inode);
+		mark_inode_dirty(inode);
 		set_bit(GIF_ALLOC_FAILED, &GFS2_I(inode)->i_flags);
 		iput(inode);
 	}
 fail:
-	if (bh)
-		brelse(bh);
 	return error;
 }
 
@@ -1151,7 +1089,9 @@
 
 static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
-	return gfs2_create_inode(dir, dentry, S_IFDIR | mode, 0, NULL, 0, 0);
+	struct gfs2_sbd *sdp = GFS2_SB(dir);
+	unsigned dsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode);
+	return gfs2_create_inode(dir, dentry, S_IFDIR | mode, 0, NULL, dsize, 0);
 }
 
 /**
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 9a2ca8b..b404f48 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -73,7 +73,7 @@
 
 void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
 {
-	bd->bd_ail = NULL;
+	bd->bd_tr = NULL;
 	list_del_init(&bd->bd_ail_st_list);
 	list_del_init(&bd->bd_ail_gl_list);
 	atomic_dec(&bd->bd_gl->gl_ail_count);
@@ -90,7 +90,7 @@
 
 static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
 			       struct writeback_control *wbc,
-			       struct gfs2_ail *ai)
+			       struct gfs2_trans *tr)
 __releases(&sdp->sd_ail_lock)
 __acquires(&sdp->sd_ail_lock)
 {
@@ -99,15 +99,15 @@
 	struct gfs2_bufdata *bd, *s;
 	struct buffer_head *bh;
 
-	list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list, bd_ail_st_list) {
+	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
 		bh = bd->bd_bh;
 
-		gfs2_assert(sdp, bd->bd_ail == ai);
+		gfs2_assert(sdp, bd->bd_tr == tr);
 
 		if (!buffer_busy(bh)) {
 			if (!buffer_uptodate(bh))
 				gfs2_io_error_bh(sdp, bh);
-			list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
+			list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
 			continue;
 		}
 
@@ -116,7 +116,7 @@
 		if (gl == bd->bd_gl)
 			continue;
 		gl = bd->bd_gl;
-		list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
+		list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
 		mapping = bh->b_page->mapping;
 		if (!mapping)
 			continue;
@@ -144,15 +144,15 @@
 void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
 {
 	struct list_head *head = &sdp->sd_ail1_list;
-	struct gfs2_ail *ai;
+	struct gfs2_trans *tr;
 
 	trace_gfs2_ail_flush(sdp, wbc, 1);
 	spin_lock(&sdp->sd_ail_lock);
 restart:
-	list_for_each_entry_reverse(ai, head, ai_list) {
+	list_for_each_entry_reverse(tr, head, tr_list) {
 		if (wbc->nr_to_write <= 0)
 			break;
-		if (gfs2_ail1_start_one(sdp, wbc, ai))
+		if (gfs2_ail1_start_one(sdp, wbc, tr))
 			goto restart;
 	}
 	spin_unlock(&sdp->sd_ail_lock);
@@ -183,20 +183,20 @@
  *
  */
 
-static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
+static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 {
 	struct gfs2_bufdata *bd, *s;
 	struct buffer_head *bh;
 
-	list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
+	list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
 					 bd_ail_st_list) {
 		bh = bd->bd_bh;
-		gfs2_assert(sdp, bd->bd_ail == ai);
+		gfs2_assert(sdp, bd->bd_tr == tr);
 		if (buffer_busy(bh))
 			continue;
 		if (!buffer_uptodate(bh))
 			gfs2_io_error_bh(sdp, bh);
-		list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
+		list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
 	}
 
 }
@@ -210,14 +210,14 @@
 
 static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
 {
-	struct gfs2_ail *ai, *s;
+	struct gfs2_trans *tr, *s;
 	int ret;
 
 	spin_lock(&sdp->sd_ail_lock);
-	list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) {
-		gfs2_ail1_empty_one(sdp, ai);
-		if (list_empty(&ai->ai_ail1_list))
-			list_move(&ai->ai_list, &sdp->sd_ail2_list);
+	list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
+		gfs2_ail1_empty_one(sdp, tr);
+		if (list_empty(&tr->tr_ail1_list))
+			list_move(&tr->tr_list, &sdp->sd_ail2_list);
 		else
 			break;
 	}
@@ -229,13 +229,13 @@
 
 static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
 {
-	struct gfs2_ail *ai;
+	struct gfs2_trans *tr;
 	struct gfs2_bufdata *bd;
 	struct buffer_head *bh;
 
 	spin_lock(&sdp->sd_ail_lock);
-	list_for_each_entry_reverse(ai, &sdp->sd_ail1_list, ai_list) {
-		list_for_each_entry(bd, &ai->ai_ail1_list, bd_ail_st_list) {
+	list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
+		list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
 			bh = bd->bd_bh;
 			if (!buffer_locked(bh))
 				continue;
@@ -256,40 +256,40 @@
  *
  */
 
-static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
+static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 {
-	struct list_head *head = &ai->ai_ail2_list;
+	struct list_head *head = &tr->tr_ail2_list;
 	struct gfs2_bufdata *bd;
 
 	while (!list_empty(head)) {
 		bd = list_entry(head->prev, struct gfs2_bufdata,
 				bd_ail_st_list);
-		gfs2_assert(sdp, bd->bd_ail == ai);
+		gfs2_assert(sdp, bd->bd_tr == tr);
 		gfs2_remove_from_ail(bd);
 	}
 }
 
 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
 {
-	struct gfs2_ail *ai, *safe;
+	struct gfs2_trans *tr, *safe;
 	unsigned int old_tail = sdp->sd_log_tail;
 	int wrap = (new_tail < old_tail);
 	int a, b, rm;
 
 	spin_lock(&sdp->sd_ail_lock);
 
-	list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) {
-		a = (old_tail <= ai->ai_first);
-		b = (ai->ai_first < new_tail);
+	list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
+		a = (old_tail <= tr->tr_first);
+		b = (tr->tr_first < new_tail);
 		rm = (wrap) ? (a || b) : (a && b);
 		if (!rm)
 			continue;
 
-		gfs2_ail2_empty_one(sdp, ai);
-		list_del(&ai->ai_list);
-		gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list));
-		gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list));
-		kfree(ai);
+		gfs2_ail2_empty_one(sdp, tr);
+		list_del(&tr->tr_list);
+		gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
+		gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
+		kfree(tr);
 	}
 
 	spin_unlock(&sdp->sd_ail_lock);
@@ -435,7 +435,7 @@
 
 static unsigned int current_tail(struct gfs2_sbd *sdp)
 {
-	struct gfs2_ail *ai;
+	struct gfs2_trans *tr;
 	unsigned int tail;
 
 	spin_lock(&sdp->sd_ail_lock);
@@ -443,8 +443,9 @@
 	if (list_empty(&sdp->sd_ail1_list)) {
 		tail = sdp->sd_log_head;
 	} else {
-		ai = list_entry(sdp->sd_ail1_list.prev, struct gfs2_ail, ai_list);
-		tail = ai->ai_first;
+		tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans,
+				tr_list);
+		tail = tr->tr_first;
 	}
 
 	spin_unlock(&sdp->sd_ail_lock);
@@ -600,7 +601,7 @@
 
 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
 {
-	struct gfs2_ail *ai;
+	struct gfs2_trans *tr;
 
 	down_write(&sdp->sd_log_flush_lock);
 
@@ -611,9 +612,12 @@
 	}
 	trace_gfs2_log_flush(sdp, 1);
 
-	ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
-	INIT_LIST_HEAD(&ai->ai_ail1_list);
-	INIT_LIST_HEAD(&ai->ai_ail2_list);
+	tr = sdp->sd_log_tr;
+	if (tr) {
+		sdp->sd_log_tr = NULL;
+		INIT_LIST_HEAD(&tr->tr_ail1_list);
+		INIT_LIST_HEAD(&tr->tr_ail2_list);
+	}
 
 	if (sdp->sd_log_num_buf != sdp->sd_log_commited_buf) {
 		printk(KERN_INFO "GFS2: log buf %u %u\n", sdp->sd_log_num_buf,
@@ -630,7 +634,8 @@
 
 	sdp->sd_log_flush_head = sdp->sd_log_head;
 	sdp->sd_log_flush_wrapped = 0;
-	ai->ai_first = sdp->sd_log_flush_head;
+	if (tr)
+		tr->tr_first = sdp->sd_log_flush_head;
 
 	gfs2_ordered_write(sdp);
 	lops_before_commit(sdp);
@@ -643,7 +648,7 @@
 		trace_gfs2_log_blocks(sdp, -1);
 		log_write_header(sdp, 0);
 	}
-	lops_after_commit(sdp, ai);
+	lops_after_commit(sdp, tr);
 
 	gfs2_log_lock(sdp);
 	sdp->sd_log_head = sdp->sd_log_flush_head;
@@ -653,16 +658,16 @@
 	sdp->sd_log_commited_revoke = 0;
 
 	spin_lock(&sdp->sd_ail_lock);
-	if (!list_empty(&ai->ai_ail1_list)) {
-		list_add(&ai->ai_list, &sdp->sd_ail1_list);
-		ai = NULL;
+	if (tr && !list_empty(&tr->tr_ail1_list)) {
+		list_add(&tr->tr_list, &sdp->sd_ail1_list);
+		tr = NULL;
 	}
 	spin_unlock(&sdp->sd_ail_lock);
 	gfs2_log_unlock(sdp);
 	trace_gfs2_log_flush(sdp, 0);
 	up_write(&sdp->sd_log_flush_lock);
 
-	kfree(ai);
+	kfree(tr);
 }
 
 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
@@ -687,6 +692,12 @@
 			     sdp->sd_jdesc->jd_blocks);
 	sdp->sd_log_blks_reserved = reserved;
 
+	if (sdp->sd_log_tr == NULL &&
+	    (tr->tr_num_buf_new || tr->tr_num_databuf_new)) {
+		gfs2_assert_withdraw(sdp, tr->tr_t_gh.gh_gl);
+		sdp->sd_log_tr = tr;
+		tr->tr_attached = 1;
+	}
 	gfs2_log_unlock(sdp);
 }
 
@@ -708,7 +719,6 @@
 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 {
 	log_refund(sdp, tr);
-	up_read(&sdp->sd_log_flush_lock);
 
 	if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
 	    ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index a505597..7318abf 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -53,8 +53,8 @@
 	 * to in-place disk block, remove it from the AIL.
 	 */
 	spin_lock(&sdp->sd_ail_lock);
-	if (bd->bd_ail)
-		list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
+	if (bd->bd_tr)
+		list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
 	spin_unlock(&sdp->sd_ail_lock);
 	get_bh(bh);
 	atomic_inc(&sdp->sd_log_pinned);
@@ -94,7 +94,7 @@
  */
 
 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
-		       struct gfs2_ail *ai)
+		       struct gfs2_trans *tr)
 {
 	struct gfs2_bufdata *bd = bh->b_private;
 
@@ -109,7 +109,7 @@
 		maybe_release_space(bd);
 
 	spin_lock(&sdp->sd_ail_lock);
-	if (bd->bd_ail) {
+	if (bd->bd_tr) {
 		list_del(&bd->bd_ail_st_list);
 		brelse(bh);
 	} else {
@@ -117,8 +117,8 @@
 		list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
 		atomic_inc(&gl->gl_ail_count);
 	}
-	bd->bd_ail = ai;
-	list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
+	bd->bd_tr = tr;
+	list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
 	spin_unlock(&sdp->sd_ail_lock);
 
 	clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
@@ -480,17 +480,22 @@
 			   &sdp->sd_log_le_buf, 0);
 }
 
-static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
+static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 {
 	struct list_head *head = &sdp->sd_log_le_buf;
 	struct gfs2_bufdata *bd;
 
+	if (tr == NULL) {
+		gfs2_assert(sdp, list_empty(head));
+		return;
+	}
+
 	while (!list_empty(head)) {
 		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
 		list_del_init(&bd->bd_list);
 		sdp->sd_log_num_buf--;
 
-		gfs2_unpin(sdp, bd->bd_bh, ai);
+		gfs2_unpin(sdp, bd->bd_bh, tr);
 	}
 	gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
 }
@@ -613,7 +618,7 @@
 	gfs2_log_write_page(sdp, page);
 }
 
-static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
+static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 {
 	struct list_head *head = &sdp->sd_log_le_revoke;
 	struct gfs2_bufdata *bd;
@@ -791,16 +796,21 @@
 		jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
 }
 
-static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
+static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 {
 	struct list_head *head = &sdp->sd_log_le_databuf;
 	struct gfs2_bufdata *bd;
 
+	if (tr == NULL) {
+		gfs2_assert(sdp, list_empty(head));
+		return;
+	}
+
 	while (!list_empty(head)) {
 		bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
 		list_del_init(&bd->bd_list);
 		sdp->sd_log_num_databuf--;
-		gfs2_unpin(sdp, bd->bd_bh, ai);
+		gfs2_unpin(sdp, bd->bd_bh, tr);
 	}
 	gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
 }
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index ba77b7d..87e062e 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -55,12 +55,13 @@
 			gfs2_log_ops[x]->lo_before_commit(sdp);
 }
 
-static inline void lops_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
+static inline void lops_after_commit(struct gfs2_sbd *sdp,
+				     struct gfs2_trans *tr)
 {
 	int x;
 	for (x = 0; gfs2_log_ops[x]; x++)
 		if (gfs2_log_ops[x]->lo_after_commit)
-			gfs2_log_ops[x]->lo_after_commit(sdp, ai);
+			gfs2_log_ops[x]->lo_after_commit(sdp, tr);
 }
 
 static inline void lops_before_scan(struct gfs2_jdesc *jd,
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index b059bbb..1a89afb 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -295,7 +295,7 @@
 	}
 	if (bd) {
 		spin_lock(&sdp->sd_ail_lock);
-		if (bd->bd_ail) {
+		if (bd->bd_tr) {
 			gfs2_remove_from_ail(bd);
 			bh->b_private = NULL;
 			bd->bd_bh = NULL;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 5a51265..0c5a575 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -592,7 +592,7 @@
  * @rs: The reservation to remove
  *
  */
-static void __rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
+static void __rs_deltree(struct gfs2_blkreserv *rs)
 {
 	struct gfs2_rgrpd *rgd;
 
@@ -605,7 +605,7 @@
 	RB_CLEAR_NODE(&rs->rs_node);
 
 	if (rs->rs_free) {
-		/* return reserved blocks to the rgrp and the ip */
+		/* return reserved blocks to the rgrp */
 		BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
 		rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
 		rs->rs_free = 0;
@@ -619,14 +619,14 @@
  * @rs: The reservation to remove
  *
  */
-void gfs2_rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
+void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
 {
 	struct gfs2_rgrpd *rgd;
 
 	rgd = rs->rs_rbm.rgd;
 	if (rgd) {
 		spin_lock(&rgd->rd_rsspin);
-		__rs_deltree(ip, rs);
+		__rs_deltree(rs);
 		spin_unlock(&rgd->rd_rsspin);
 	}
 }
@@ -640,7 +640,7 @@
 {
 	down_write(&ip->i_rw_mutex);
 	if (ip->i_res) {
-		gfs2_rs_deltree(ip, ip->i_res);
+		gfs2_rs_deltree(ip->i_res);
 		BUG_ON(ip->i_res->rs_free);
 		kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
 		ip->i_res = NULL;
@@ -664,7 +664,7 @@
 	spin_lock(&rgd->rd_rsspin);
 	while ((n = rb_first(&rgd->rd_rstree))) {
 		rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
-		__rs_deltree(NULL, rs);
+		__rs_deltree(rs);
 	}
 	spin_unlock(&rgd->rd_rsspin);
 }
@@ -1874,7 +1874,7 @@
 
 		/* Drop reservation, if we couldn't use reserved rgrp */
 		if (gfs2_rs_active(rs))
-			gfs2_rs_deltree(ip, rs);
+			gfs2_rs_deltree(rs);
 check_rgrp:
 		/* Check for unlinked inodes which can be reclaimed */
 		if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK)
@@ -2087,7 +2087,7 @@
 			if (rs->rs_free && !ret)
 				goto out;
 		}
-		__rs_deltree(ip, rs);
+		__rs_deltree(rs);
 	}
 out:
 	spin_unlock(&rgd->rd_rsspin);
@@ -2180,13 +2180,7 @@
 	if (dinode)
 		gfs2_trans_add_unrevoke(sdp, block, 1);
 
-	/*
-	 * This needs reviewing to see why we cannot do the quota change
-	 * at this point in the dinode case.
-	 */
-	if (ndata)
-		gfs2_quota_change(ip, ndata, ip->i_inode.i_uid,
-				  ip->i_inode.i_gid);
+	gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid);
 
 	rbm.rgd->rd_free_clone -= *nblocks;
 	trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index 8421858..5b3f4a8 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -47,7 +47,7 @@
 			     bool dinode, u64 *generation);
 
 extern int gfs2_rs_alloc(struct gfs2_inode *ip);
-extern void gfs2_rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs);
+extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
 extern void gfs2_rs_delete(struct gfs2_inode *ip);
 extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
 extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index cab77b8..917c8e1 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1512,7 +1512,7 @@
 out_unlock:
 	/* Error path for case 1 */
 	if (gfs2_rs_active(ip->i_res))
-		gfs2_rs_deltree(ip, ip->i_res);
+		gfs2_rs_deltree(ip->i_res);
 
 	if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
 		gfs2_glock_dq(&ip->i_iopen_gh);
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index 2ee13e8..20c007d 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -159,9 +159,9 @@
 /* Callback (local or remote) requesting lock demotion */
 TRACE_EVENT(gfs2_demote_rq,
 
-	TP_PROTO(const struct gfs2_glock *gl),
+	TP_PROTO(const struct gfs2_glock *gl, bool remote),
 
-	TP_ARGS(gl),
+	TP_ARGS(gl, remote),
 
 	TP_STRUCT__entry(
 		__field(        dev_t,  dev                     )
@@ -170,6 +170,7 @@
 		__field(	u8,	cur_state		)
 		__field(	u8,	dmt_state		)
 		__field(	unsigned long,	flags		)
+		__field(	bool,	remote			)
 	),
 
 	TP_fast_assign(
@@ -179,14 +180,16 @@
 		__entry->cur_state	= glock_trace_state(gl->gl_state);
 		__entry->dmt_state	= glock_trace_state(gl->gl_demote_state);
 		__entry->flags		= gl->gl_flags  | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
+		__entry->remote		= remote;
 	),
 
-	TP_printk("%u,%u glock %d:%lld demote %s to %s flags:%s",
+	TP_printk("%u,%u glock %d:%lld demote %s to %s flags:%s %s",
 		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
 		  (unsigned long long)__entry->glnum,
                   glock_trace_name(__entry->cur_state),
                   glock_trace_name(__entry->dmt_state),
-		  show_glock_flags(__entry->flags))
+		  show_glock_flags(__entry->flags),
+		  __entry->remote ? "remote" : "local")
 
 );
 
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 5bc023e..7374907 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -136,8 +136,10 @@
 	if (tr->tr_t_gh.gh_gl) {
 		gfs2_glock_dq(&tr->tr_t_gh);
 		gfs2_holder_uninit(&tr->tr_t_gh);
-		kfree(tr);
+		if (!tr->tr_attached)
+			kfree(tr);
 	}
+	up_read(&sdp->sd_log_flush_lock);
 
 	if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS)
 		gfs2_log_flush(sdp, NULL);
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 0796c45..01bfe766 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -144,6 +144,9 @@
 			timeout);
 	if (ret < 0)
 		return -ERESTARTSYS;
+	/* Reset the lock status after a server reboot so we resend */
+	if (block->b_status == nlm_lck_denied_grace_period)
+		block->b_status = nlm_lck_blocked;
 	req->a_res.status = block->b_status;
 	return 0;
 }
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 7e529c3..9760ecb 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -550,9 +550,6 @@
 		status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
 		if (status < 0)
 			break;
-		/* Resend the blocking lock request after a server reboot */
-		if (resp->status ==  nlm_lck_denied_grace_period)
-			continue;
 		if (resp->status != nlm_lck_blocked)
 			break;
 	}
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 5088b57..cff089a 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -125,6 +125,9 @@
 	set_freezable();
 
 	while (!kthread_should_stop()) {
+		if (try_to_freeze())
+			continue;
+
 		prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
 		spin_lock_bh(&serv->sv_cb_lock);
 		if (!list_empty(&serv->sv_cb_list)) {
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 2960512..a13d26e 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -500,7 +500,7 @@
 		     &args->craa_type_mask))
 		pnfs_recall_all_layouts(cps->clp);
 	if (flags)
-		nfs_expire_all_delegation_types(cps->clp, flags);
+		nfs_expire_unused_delegation_types(cps->clp, flags);
 out:
 	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
 	return status;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 84d8eae..c513b0c 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -593,6 +593,8 @@
 		args.flags |= RPC_CLNT_CREATE_DISCRTRY;
 	if (test_bit(NFS_CS_NORESVPORT, &clp->cl_flags))
 		args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
+	if (test_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags))
+		args.flags |= RPC_CLNT_CREATE_INFINITE_SLOTS;
 
 	if (!IS_ERR(clp->cl_rpcclient))
 		return 0;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 6390a4b..57db324 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -64,17 +64,15 @@
 	return ret;
 }
 
-static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
+static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
 {
 	struct inode *inode = state->inode;
 	struct file_lock *fl;
 	int status = 0;
 
 	if (inode->i_flock == NULL)
-		return 0;
-
-	if (inode->i_flock == NULL)
 		goto out;
+
 	/* Protect inode->i_flock using the file locks lock */
 	lock_flocks();
 	for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
@@ -83,7 +81,7 @@
 		if (nfs_file_open_context(fl->fl_file) != ctx)
 			continue;
 		unlock_flocks();
-		status = nfs4_lock_delegation_recall(state, fl);
+		status = nfs4_lock_delegation_recall(fl, state, stateid);
 		if (status < 0)
 			goto out;
 		lock_flocks();
@@ -120,7 +118,7 @@
 		seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
 		err = nfs4_open_delegation_recall(ctx, state, stateid);
 		if (!err)
-			err = nfs_delegation_claim_locks(ctx, state);
+			err = nfs_delegation_claim_locks(ctx, state, stateid);
 		if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
 			err = -EAGAIN;
 		mutex_unlock(&sp->so_delegreturn_mutex);
@@ -389,6 +387,24 @@
 	return err;
 }
 
+static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
+{
+	bool ret = false;
+
+	if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
+		ret = true;
+	if (test_and_clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) && !ret) {
+		struct inode *inode;
+
+		spin_lock(&delegation->lock);
+		inode = delegation->inode;
+		if (inode && list_empty(&NFS_I(inode)->open_files))
+			ret = true;
+		spin_unlock(&delegation->lock);
+	}
+	return ret;
+}
+
 /**
  * nfs_client_return_marked_delegations - return previously marked delegations
  * @clp: nfs_client to process
@@ -411,8 +427,7 @@
 	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
 		list_for_each_entry_rcu(delegation, &server->delegations,
 								super_list) {
-			if (!test_and_clear_bit(NFS_DELEGATION_RETURN,
-							&delegation->flags))
+			if (!nfs_delegation_need_return(delegation))
 				continue;
 			inode = nfs_delegation_grab_inode(delegation);
 			if (inode == NULL)
@@ -471,6 +486,13 @@
 	return err;
 }
 
+static void nfs_mark_return_if_closed_delegation(struct nfs_server *server,
+		struct nfs_delegation *delegation)
+{
+	set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
+	set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
+}
+
 static void nfs_mark_return_delegation(struct nfs_server *server,
 		struct nfs_delegation *delegation)
 {
@@ -478,6 +500,45 @@
 	set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
 }
 
+static bool nfs_server_mark_return_all_delegations(struct nfs_server *server)
+{
+	struct nfs_delegation *delegation;
+	bool ret = false;
+
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
+		nfs_mark_return_delegation(server, delegation);
+		ret = true;
+	}
+	return ret;
+}
+
+static void nfs_client_mark_return_all_delegations(struct nfs_client *clp)
+{
+	struct nfs_server *server;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs_server_mark_return_all_delegations(server);
+	rcu_read_unlock();
+}
+
+static void nfs_delegation_run_state_manager(struct nfs_client *clp)
+{
+	if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
+		nfs4_schedule_state_manager(clp);
+}
+
+/**
+ * nfs_expire_all_delegations
+ * @clp: client to process
+ *
+ */
+void nfs_expire_all_delegations(struct nfs_client *clp)
+{
+	nfs_client_mark_return_all_delegations(clp);
+	nfs_delegation_run_state_manager(clp);
+}
+
 /**
  * nfs_super_return_all_delegations - return delegations for one superblock
  * @sb: sb to process
@@ -486,24 +547,22 @@
 void nfs_server_return_all_delegations(struct nfs_server *server)
 {
 	struct nfs_client *clp = server->nfs_client;
-	struct nfs_delegation *delegation;
+	bool need_wait;
 
 	if (clp == NULL)
 		return;
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
-		spin_lock(&delegation->lock);
-		set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
-		spin_unlock(&delegation->lock);
-	}
+	need_wait = nfs_server_mark_return_all_delegations(server);
 	rcu_read_unlock();
 
-	if (nfs_client_return_marked_delegations(clp) != 0)
+	if (need_wait) {
 		nfs4_schedule_state_manager(clp);
+		nfs4_wait_clnt_recover(clp);
+	}
 }
 
-static void nfs_mark_return_all_delegation_types(struct nfs_server *server,
+static void nfs_mark_return_unused_delegation_types(struct nfs_server *server,
 						 fmode_t flags)
 {
 	struct nfs_delegation *delegation;
@@ -512,27 +571,21 @@
 		if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
 			continue;
 		if (delegation->type & flags)
-			nfs_mark_return_delegation(server, delegation);
+			nfs_mark_return_if_closed_delegation(server, delegation);
 	}
 }
 
-static void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp,
+static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *clp,
 							fmode_t flags)
 {
 	struct nfs_server *server;
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
-		nfs_mark_return_all_delegation_types(server, flags);
+		nfs_mark_return_unused_delegation_types(server, flags);
 	rcu_read_unlock();
 }
 
-static void nfs_delegation_run_state_manager(struct nfs_client *clp)
-{
-	if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
-		nfs4_schedule_state_manager(clp);
-}
-
 void nfs_remove_bad_delegation(struct inode *inode)
 {
 	struct nfs_delegation *delegation;
@@ -546,27 +599,17 @@
 EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation);
 
 /**
- * nfs_expire_all_delegation_types
+ * nfs_expire_unused_delegation_types
  * @clp: client to process
  * @flags: delegation types to expire
  *
  */
-void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags)
+void nfs_expire_unused_delegation_types(struct nfs_client *clp, fmode_t flags)
 {
-	nfs_client_mark_return_all_delegation_types(clp, flags);
+	nfs_client_mark_return_unused_delegation_types(clp, flags);
 	nfs_delegation_run_state_manager(clp);
 }
 
-/**
- * nfs_expire_all_delegations
- * @clp: client to process
- *
- */
-void nfs_expire_all_delegations(struct nfs_client *clp)
-{
-	nfs_expire_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
-}
-
 static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
 {
 	struct nfs_delegation *delegation;
@@ -574,7 +617,7 @@
 	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
 		if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
 			continue;
-		nfs_mark_return_delegation(server, delegation);
+		nfs_mark_return_if_closed_delegation(server, delegation);
 	}
 }
 
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index d54d4fc..9a79c7a 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -28,6 +28,7 @@
 enum {
 	NFS_DELEGATION_NEED_RECLAIM = 0,
 	NFS_DELEGATION_RETURN,
+	NFS_DELEGATION_RETURN_IF_CLOSED,
 	NFS_DELEGATION_REFERENCED,
 	NFS_DELEGATION_RETURNING,
 };
@@ -41,7 +42,7 @@
 struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle);
 void nfs_server_return_all_delegations(struct nfs_server *);
 void nfs_expire_all_delegations(struct nfs_client *clp);
-void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags);
+void nfs_expire_unused_delegation_types(struct nfs_client *clp, fmode_t flags);
 void nfs_expire_unreferenced_delegations(struct nfs_client *clp);
 int nfs_client_return_marked_delegations(struct nfs_client *clp);
 int nfs_delegations_present(struct nfs_client *clp);
@@ -53,7 +54,7 @@
 /* NFSv4 delegation-related procedures */
 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync);
 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid);
-int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl);
+int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
 bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, fmode_t flags);
 
 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index f23f455..e093e73 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1486,6 +1486,8 @@
 		goto no_open;
 	if (d_mountpoint(dentry))
 		goto no_open;
+	if (NFS_SB(dentry->d_sb)->caps & NFS_CAP_ATOMIC_OPEN_V1)
+		goto no_open;
 
 	inode = dentry->d_inode;
 	parent = dget_parent(dentry);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 29f4a48..a87a44f 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -744,6 +744,7 @@
 do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
 {
 	struct inode *inode = filp->f_mapping->host;
+	struct nfs_lock_context *l_ctx;
 	int status;
 
 	/*
@@ -752,6 +753,14 @@
 	 */
 	nfs_sync_mapping(filp->f_mapping);
 
+	l_ctx = nfs_get_lock_context(nfs_file_open_context(filp));
+	if (!IS_ERR(l_ctx)) {
+		status = nfs_iocounter_wait(&l_ctx->io_count);
+		nfs_put_lock_context(l_ctx);
+		if (status < 0)
+			return status;
+	}
+
 	/* NOTE: special case
 	 * 	If we're signalled while cleaning up locks on process exit, we
 	 * 	still need to complete the unlock.
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 1f94167..c1c7a9d 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -561,20 +561,22 @@
 	l_ctx->lockowner.l_owner = current->files;
 	l_ctx->lockowner.l_pid = current->tgid;
 	INIT_LIST_HEAD(&l_ctx->list);
+	nfs_iocounter_init(&l_ctx->io_count);
 }
 
 static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context *ctx)
 {
-	struct nfs_lock_context *pos;
+	struct nfs_lock_context *head = &ctx->lock_context;
+	struct nfs_lock_context *pos = head;
 
-	list_for_each_entry(pos, &ctx->lock_context.list, list) {
+	do {
 		if (pos->lockowner.l_owner != current->files)
 			continue;
 		if (pos->lockowner.l_pid != current->tgid)
 			continue;
 		atomic_inc(&pos->count);
 		return pos;
-	}
+	} while ((pos = list_entry(pos->list.next, typeof(*pos), list)) != head);
 	return NULL;
 }
 
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 541c9eb..91e59a3 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -229,6 +229,13 @@
 			      struct nfs_pgio_header *hdr,
 			      void (*release)(struct nfs_pgio_header *hdr));
 void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos);
+int nfs_iocounter_wait(struct nfs_io_counter *c);
+
+static inline void nfs_iocounter_init(struct nfs_io_counter *c)
+{
+	c->flags = 0;
+	atomic_set(&c->io_count, 0);
+}
 
 /* nfs2xdr.c */
 extern struct rpc_procinfo nfs_procedures[];
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 944c9a5..553a83c 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -36,6 +36,7 @@
 
 struct nfs4_minor_version_ops {
 	u32	minor_version;
+	unsigned init_caps;
 
 	int	(*call_sync)(struct rpc_clnt *clnt,
 			struct nfs_server *server,
@@ -143,12 +144,14 @@
 enum {
 	LK_STATE_IN_USE,
 	NFS_DELEGATED_STATE,		/* Current stateid is delegation */
+	NFS_OPEN_STATE,			/* OPEN stateid is set */
 	NFS_O_RDONLY_STATE,		/* OPEN stateid has read-only state */
 	NFS_O_WRONLY_STATE,		/* OPEN stateid has write-only state */
 	NFS_O_RDWR_STATE,		/* OPEN stateid has read/write state */
 	NFS_STATE_RECLAIM_REBOOT,	/* OPEN stateid server rebooted */
 	NFS_STATE_RECLAIM_NOGRACE,	/* OPEN stateid needs to recover state */
 	NFS_STATE_POSIX_LOCKS,		/* Posix locks are supported */
+	NFS_STATE_RECOVERY_FAILED,	/* OPEN stateid state recovery failed */
 };
 
 struct nfs4_state {
@@ -233,6 +236,10 @@
 extern int nfs4_proc_secinfo(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
 extern int nfs4_release_lockowner(struct nfs4_lock_state *);
 extern const struct xattr_handler *nfs4_xattr_handlers[];
+extern int nfs4_set_rw_stateid(nfs4_stateid *stateid,
+		const struct nfs_open_context *ctx,
+		const struct nfs_lock_context *l_ctx,
+		fmode_t fmode);
 
 #if defined(CONFIG_NFS_V4_1)
 static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
@@ -347,13 +354,13 @@
 extern int nfs4_client_recover_expired_lease(struct nfs_client *clp);
 extern void nfs4_schedule_state_manager(struct nfs_client *);
 extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp);
-extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
+extern int nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
 extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
 extern void nfs41_handle_server_scope(struct nfs_client *,
 				      struct nfs41_server_scope **);
 extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
 extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
-extern void nfs4_select_rw_stateid(nfs4_stateid *, struct nfs4_state *,
+extern int nfs4_select_rw_stateid(nfs4_stateid *, struct nfs4_state *,
 		fmode_t, const struct nfs_lockowner *);
 
 extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
@@ -412,6 +419,11 @@
 	return memcmp(dst, src, sizeof(*dst)) == 0;
 }
 
+static inline bool nfs4_valid_open_stateid(const struct nfs4_state *state)
+{
+	return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0;
+}
+
 #else
 
 #define nfs4_close_state(a, b) do { } while (0)
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 66b6664..947b0c9 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -198,8 +198,12 @@
 	/* Check NFS protocol revision and initialize RPC op vector */
 	clp->rpc_ops = &nfs_v4_clientops;
 
+	if (clp->cl_minorversion != 0)
+		__set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
 	__set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
-	error = nfs_create_rpc_client(clp, timeparms, authflavour);
+	error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_GSS_KRB5I);
+	if (error == -EINVAL)
+		error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_NULL);
 	if (error < 0)
 		goto error;
 
@@ -730,6 +734,19 @@
 	if (error < 0)
 		goto out;
 
+	/* Set the basic capabilities */
+	server->caps |= server->nfs_client->cl_mvops->init_caps;
+	if (server->flags & NFS_MOUNT_NORDIRPLUS)
+			server->caps &= ~NFS_CAP_READDIRPLUS;
+	/*
+	 * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower
+	 * authentication.
+	 */
+	if (nfs4_disable_idmapping &&
+			server->client->cl_auth->au_flavor == RPC_AUTH_UNIX)
+		server->caps |= NFS_CAP_UIDGID_NOMAP;
+
+
 	/* Probe the root fh to retrieve its FSID and filehandle */
 	error = nfs4_get_rootfh(server, mntfh);
 	if (error < 0)
@@ -773,9 +790,6 @@
 
 	/* Initialise the client representation from the mount data */
 	server->flags = data->flags;
-	server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR|NFS_CAP_POSIX_LOCK;
-	if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
-			server->caps |= NFS_CAP_READDIRPLUS;
 	server->options = data->options;
 
 	/* Get a client record */
@@ -792,13 +806,6 @@
 	if (error < 0)
 		goto error;
 
-	/*
-	 * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower
-	 * authentication.
-	 */
-	if (nfs4_disable_idmapping && data->auth_flavors[0] == RPC_AUTH_UNIX)
-		server->caps |= NFS_CAP_UIDGID_NOMAP;
-
 	if (data->rsize)
 		server->rsize = nfs_block_size(data->rsize, NULL);
 	if (data->wsize)
@@ -876,7 +883,6 @@
 
 	/* Initialise the client representation from the parent server */
 	nfs_server_copy_userdata(server, parent_server);
-	server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
 
 	/* Get a client representation.
 	 * Note: NFSv4 always uses TCP, */
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 4fb234d..22d1062 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -158,11 +158,14 @@
 	case -NFS4ERR_OPENMODE:
 		if (state == NULL)
 			break;
-		nfs4_schedule_stateid_recovery(mds_server, state);
+		if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
+			goto out_bad_stateid;
 		goto wait_on_recovery;
 	case -NFS4ERR_EXPIRED:
-		if (state != NULL)
-			nfs4_schedule_stateid_recovery(mds_server, state);
+		if (state != NULL) {
+			if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
+				goto out_bad_stateid;
+		}
 		nfs4_schedule_lease_recovery(mds_client);
 		goto wait_on_recovery;
 	/* DS session errors */
@@ -226,6 +229,9 @@
 out:
 	task->tk_status = 0;
 	return -EAGAIN;
+out_bad_stateid:
+	task->tk_status = -EIO;
+	return 0;
 wait_on_recovery:
 	rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
 	if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
@@ -299,6 +305,10 @@
 {
 	struct nfs_read_data *rdata = data;
 
+	if (unlikely(test_bit(NFS_CONTEXT_BAD, &rdata->args.context->flags))) {
+		rpc_exit(task, -EIO);
+		return;
+	}
 	if (filelayout_reset_to_mds(rdata->header->lseg)) {
 		dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
 		filelayout_reset_read(rdata);
@@ -307,10 +317,13 @@
 	}
 	rdata->read_done_cb = filelayout_read_done_cb;
 
-	nfs41_setup_sequence(rdata->ds_clp->cl_session,
+	if (nfs41_setup_sequence(rdata->ds_clp->cl_session,
 			&rdata->args.seq_args,
 			&rdata->res.seq_res,
-			task);
+			task))
+		return;
+	nfs4_set_rw_stateid(&rdata->args.stateid, rdata->args.context,
+			rdata->args.lock_context, FMODE_READ);
 }
 
 static void filelayout_read_call_done(struct rpc_task *task, void *data)
@@ -401,16 +414,23 @@
 {
 	struct nfs_write_data *wdata = data;
 
+	if (unlikely(test_bit(NFS_CONTEXT_BAD, &wdata->args.context->flags))) {
+		rpc_exit(task, -EIO);
+		return;
+	}
 	if (filelayout_reset_to_mds(wdata->header->lseg)) {
 		dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
 		filelayout_reset_write(wdata);
 		rpc_exit(task, 0);
 		return;
 	}
-	nfs41_setup_sequence(wdata->ds_clp->cl_session,
+	if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
 			&wdata->args.seq_args,
 			&wdata->res.seq_res,
-			task);
+			task))
+		return;
+	nfs4_set_rw_stateid(&wdata->args.stateid, wdata->args.context,
+			wdata->args.lock_context, FMODE_WRITE);
 }
 
 static void filelayout_write_call_done(struct rpc_task *task, void *data)
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 0dd7660..cdb0b41 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -134,33 +134,38 @@
 	return ret;
 }
 
+/**
+ * nfs_find_best_sec - Find a security mechanism supported locally
+ * @flavors: List of security tuples returned by SECINFO procedure
+ *
+ * Return the pseudoflavor of the first security mechanism in
+ * "flavors" that is locally supported.  Return RPC_AUTH_UNIX if
+ * no matching flavor is found in the array.  The "flavors" array
+ * is searched in the order returned from the server, per RFC 3530
+ * recommendation.
+ */
 rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors)
 {
-	struct gss_api_mech *mech;
-	struct xdr_netobj oid;
-	int i;
-	rpc_authflavor_t pseudoflavor = RPC_AUTH_UNIX;
+	rpc_authflavor_t pseudoflavor;
+	struct nfs4_secinfo4 *secinfo;
+	unsigned int i;
 
 	for (i = 0; i < flavors->num_flavors; i++) {
-		struct nfs4_secinfo_flavor *flavor;
-		flavor = &flavors->flavors[i];
+		secinfo = &flavors->flavors[i];
 
-		if (flavor->flavor == RPC_AUTH_NULL || flavor->flavor == RPC_AUTH_UNIX) {
-			pseudoflavor = flavor->flavor;
-			break;
-		} else if (flavor->flavor == RPC_AUTH_GSS) {
-			oid.len  = flavor->gss.sec_oid4.len;
-			oid.data = flavor->gss.sec_oid4.data;
-			mech = gss_mech_get_by_OID(&oid);
-			if (!mech)
-				continue;
-			pseudoflavor = gss_svc_to_pseudoflavor(mech, flavor->gss.service);
-			gss_mech_put(mech);
+		switch (secinfo->flavor) {
+		case RPC_AUTH_NULL:
+		case RPC_AUTH_UNIX:
+		case RPC_AUTH_GSS:
+			pseudoflavor = rpcauth_get_pseudoflavor(secinfo->flavor,
+							&secinfo->flavor_info);
+			if (pseudoflavor != RPC_AUTH_MAXFLAVOR)
+				return pseudoflavor;
 			break;
 		}
 	}
 
-	return pseudoflavor;
+	return RPC_AUTH_UNIX;
 }
 
 static rpc_authflavor_t nfs4_negotiate_security(struct inode *inode, struct qstr *name)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 0ad025e..9da4bd5 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -107,6 +107,8 @@
 		return -EPROTONOSUPPORT;
 	case -NFS4ERR_ACCESS:
 		return -EACCES;
+	case -NFS4ERR_FILE_OPEN:
+		return -EBUSY;
 	default:
 		dprintk("%s could not handle NFSv4 error %d\n",
 				__func__, -err);
@@ -295,19 +297,30 @@
 			}
 			if (state == NULL)
 				break;
-			nfs4_schedule_stateid_recovery(server, state);
+			ret = nfs4_schedule_stateid_recovery(server, state);
+			if (ret < 0)
+				break;
 			goto wait_on_recovery;
 		case -NFS4ERR_DELEG_REVOKED:
 		case -NFS4ERR_ADMIN_REVOKED:
 		case -NFS4ERR_BAD_STATEID:
+			if (inode != NULL && nfs4_have_delegation(inode, FMODE_READ)) {
+				nfs_remove_bad_delegation(inode);
+				exception->retry = 1;
+				break;
+			}
 			if (state == NULL)
 				break;
-			nfs_remove_bad_delegation(state->inode);
-			nfs4_schedule_stateid_recovery(server, state);
+			ret = nfs4_schedule_stateid_recovery(server, state);
+			if (ret < 0)
+				break;
 			goto wait_on_recovery;
 		case -NFS4ERR_EXPIRED:
-			if (state != NULL)
-				nfs4_schedule_stateid_recovery(server, state);
+			if (state != NULL) {
+				ret = nfs4_schedule_stateid_recovery(server, state);
+				if (ret < 0)
+					break;
+			}
 		case -NFS4ERR_STALE_STATEID:
 		case -NFS4ERR_STALE_CLIENTID:
 			nfs4_schedule_lease_recovery(clp);
@@ -756,10 +769,40 @@
 	struct iattr attrs;
 	unsigned long timestamp;
 	unsigned int rpc_done : 1;
+	unsigned int is_recover : 1;
 	int rpc_status;
 	int cancelled;
 };
 
+static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
+		int err, struct nfs4_exception *exception)
+{
+	if (err != -EINVAL)
+		return false;
+	if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
+		return false;
+	server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
+	exception->retry = 1;
+	return true;
+}
+
+static enum open_claim_type4
+nfs4_map_atomic_open_claim(struct nfs_server *server,
+		enum open_claim_type4 claim)
+{
+	if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
+		return claim;
+	switch (claim) {
+	default:
+		return claim;
+	case NFS4_OPEN_CLAIM_FH:
+		return NFS4_OPEN_CLAIM_NULL;
+	case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
+		return NFS4_OPEN_CLAIM_DELEGATE_CUR;
+	case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
+		return NFS4_OPEN_CLAIM_DELEGATE_PREV;
+	}
+}
 
 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
 {
@@ -775,6 +818,7 @@
 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
 		struct nfs4_state_owner *sp, fmode_t fmode, int flags,
 		const struct iattr *attrs,
+		enum open_claim_type4 claim,
 		gfp_t gfp_mask)
 {
 	struct dentry *parent = dget_parent(dentry);
@@ -793,7 +837,6 @@
 	p->dir = parent;
 	p->owner = sp;
 	atomic_inc(&sp->so_count);
-	p->o_arg.fh = NFS_FH(dir);
 	p->o_arg.open_flags = flags;
 	p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
 	/* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
@@ -811,7 +854,19 @@
 	p->o_arg.server = server;
 	p->o_arg.bitmask = server->attr_bitmask;
 	p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
-	p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
+	p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
+	switch (p->o_arg.claim) {
+	case NFS4_OPEN_CLAIM_NULL:
+	case NFS4_OPEN_CLAIM_DELEGATE_CUR:
+	case NFS4_OPEN_CLAIM_DELEGATE_PREV:
+		p->o_arg.fh = NFS_FH(dir);
+		break;
+	case NFS4_OPEN_CLAIM_PREVIOUS:
+	case NFS4_OPEN_CLAIM_FH:
+	case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
+	case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
+		p->o_arg.fh = NFS_FH(dentry->d_inode);
+	}
 	if (attrs != NULL && attrs->ia_valid != 0) {
 		__be32 verf[2];
 
@@ -924,6 +979,7 @@
 	if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
 		nfs4_stateid_copy(&state->stateid, stateid);
 	nfs4_stateid_copy(&state->open_stateid, stateid);
+	set_bit(NFS_OPEN_STATE, &state->flags);
 	switch (fmode) {
 		case FMODE_READ:
 			set_bit(NFS_O_RDONLY_STATE, &state->flags);
@@ -1047,9 +1103,11 @@
 		nfs4_stateid_copy(&stateid, &delegation->stateid);
 		rcu_read_unlock();
 		nfs_release_seqid(opendata->o_arg.seqid);
-		ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
-		if (ret != 0)
-			goto out;
+		if (!opendata->is_recover) {
+			ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
+			if (ret != 0)
+				goto out;
+		}
 		ret = -EAGAIN;
 
 		/* Try to update the stateid using the delegation */
@@ -1194,11 +1252,13 @@
 	return ERR_PTR(-ENOENT);
 }
 
-static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
+static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
+		struct nfs4_state *state, enum open_claim_type4 claim)
 {
 	struct nfs4_opendata *opendata;
 
-	opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS);
+	opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
+			NULL, claim, GFP_NOFS);
 	if (opendata == NULL)
 		return ERR_PTR(-ENOMEM);
 	opendata->state = state;
@@ -1234,6 +1294,7 @@
 
 	/* memory barrier prior to reading state->n_* */
 	clear_bit(NFS_DELEGATED_STATE, &state->flags);
+	clear_bit(NFS_OPEN_STATE, &state->flags);
 	smp_rmb();
 	if (state->n_rdwr != 0) {
 		clear_bit(NFS_O_RDWR_STATE, &state->flags);
@@ -1284,11 +1345,10 @@
 	fmode_t delegation_type = 0;
 	int status;
 
-	opendata = nfs4_open_recoverdata_alloc(ctx, state);
+	opendata = nfs4_open_recoverdata_alloc(ctx, state,
+			NFS4_OPEN_CLAIM_PREVIOUS);
 	if (IS_ERR(opendata))
 		return PTR_ERR(opendata);
-	opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
-	opendata->o_arg.fh = NFS_FH(state->inode);
 	rcu_read_lock();
 	delegation = rcu_dereference(NFS_I(state->inode)->delegation);
 	if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
@@ -1307,6 +1367,8 @@
 	int err;
 	do {
 		err = _nfs4_do_open_reclaim(ctx, state);
+		if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
+			continue;
 		if (err != -NFS4ERR_DELAY)
 			break;
 		nfs4_handle_exception(server, err, &exception);
@@ -1321,71 +1383,72 @@
 
 	ctx = nfs4_state_find_open_context(state);
 	if (IS_ERR(ctx))
-		return PTR_ERR(ctx);
+		return -EAGAIN;
 	ret = nfs4_do_open_reclaim(ctx, state);
 	put_nfs_open_context(ctx);
 	return ret;
 }
 
-static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
+static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
 {
-	struct nfs4_opendata *opendata;
-	int ret;
-
-	opendata = nfs4_open_recoverdata_alloc(ctx, state);
-	if (IS_ERR(opendata))
-		return PTR_ERR(opendata);
-	opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
-	nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
-	ret = nfs4_open_recover(opendata, state);
-	nfs4_opendata_put(opendata);
-	return ret;
+	switch (err) {
+		default:
+			printk(KERN_ERR "NFS: %s: unhandled error "
+					"%d.\n", __func__, err);
+		case 0:
+		case -ENOENT:
+		case -ESTALE:
+			break;
+		case -NFS4ERR_BADSESSION:
+		case -NFS4ERR_BADSLOT:
+		case -NFS4ERR_BAD_HIGH_SLOT:
+		case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+		case -NFS4ERR_DEADSESSION:
+			set_bit(NFS_DELEGATED_STATE, &state->flags);
+			nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
+			return -EAGAIN;
+		case -NFS4ERR_STALE_CLIENTID:
+		case -NFS4ERR_STALE_STATEID:
+			set_bit(NFS_DELEGATED_STATE, &state->flags);
+		case -NFS4ERR_EXPIRED:
+			/* Don't recall a delegation if it was lost */
+			nfs4_schedule_lease_recovery(server->nfs_client);
+			return -EAGAIN;
+		case -NFS4ERR_DELEG_REVOKED:
+		case -NFS4ERR_ADMIN_REVOKED:
+		case -NFS4ERR_BAD_STATEID:
+		case -NFS4ERR_OPENMODE:
+			nfs_inode_find_state_and_recover(state->inode,
+					stateid);
+			nfs4_schedule_stateid_recovery(server, state);
+			return 0;
+		case -NFS4ERR_DELAY:
+		case -NFS4ERR_GRACE:
+			set_bit(NFS_DELEGATED_STATE, &state->flags);
+			ssleep(1);
+			return -EAGAIN;
+		case -ENOMEM:
+		case -NFS4ERR_DENIED:
+			/* kill_proc(fl->fl_pid, SIGLOST, 1); */
+			return 0;
+	}
+	return err;
 }
 
 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
 {
-	struct nfs4_exception exception = { };
 	struct nfs_server *server = NFS_SERVER(state->inode);
+	struct nfs4_opendata *opendata;
 	int err;
-	do {
-		err = _nfs4_open_delegation_recall(ctx, state, stateid);
-		switch (err) {
-			case 0:
-			case -ENOENT:
-			case -ESTALE:
-				goto out;
-			case -NFS4ERR_BADSESSION:
-			case -NFS4ERR_BADSLOT:
-			case -NFS4ERR_BAD_HIGH_SLOT:
-			case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
-			case -NFS4ERR_DEADSESSION:
-				set_bit(NFS_DELEGATED_STATE, &state->flags);
-				nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
-				err = -EAGAIN;
-				goto out;
-			case -NFS4ERR_STALE_CLIENTID:
-			case -NFS4ERR_STALE_STATEID:
-				set_bit(NFS_DELEGATED_STATE, &state->flags);
-			case -NFS4ERR_EXPIRED:
-				/* Don't recall a delegation if it was lost */
-				nfs4_schedule_lease_recovery(server->nfs_client);
-				err = -EAGAIN;
-				goto out;
-			case -NFS4ERR_DELEG_REVOKED:
-			case -NFS4ERR_ADMIN_REVOKED:
-			case -NFS4ERR_BAD_STATEID:
-				nfs_inode_find_state_and_recover(state->inode,
-						stateid);
-				nfs4_schedule_stateid_recovery(server, state);
-			case -ENOMEM:
-				err = 0;
-				goto out;
-		}
-		set_bit(NFS_DELEGATED_STATE, &state->flags);
-		err = nfs4_handle_exception(server, err, &exception);
-	} while (exception.retry);
-out:
-	return err;
+
+	opendata = nfs4_open_recoverdata_alloc(ctx, state,
+			NFS4_OPEN_CLAIM_DELEG_CUR_FH);
+	if (IS_ERR(opendata))
+		return PTR_ERR(opendata);
+	nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
+	err = nfs4_open_recover(opendata, state);
+	nfs4_opendata_put(opendata);
+	return nfs4_handle_delegation_recall_error(server, state, stateid, err);
 }
 
 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
@@ -1468,6 +1531,7 @@
 {
 	struct nfs4_opendata *data = calldata;
 	struct nfs4_state_owner *sp = data->owner;
+	struct nfs_client *clp = sp->so_server->nfs_client;
 
 	if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
 		goto out_wait;
@@ -1483,15 +1547,20 @@
 		rcu_read_lock();
 		delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
 		if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
+		    data->o_arg.claim != NFS4_OPEN_CLAIM_DELEG_CUR_FH &&
 		    can_open_delegated(delegation, data->o_arg.fmode))
 			goto unlock_no_action;
 		rcu_read_unlock();
 	}
 	/* Update client id. */
-	data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
-	if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
-		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
+	data->o_arg.clientid = clp->cl_clientid;
+	switch (data->o_arg.claim) {
+	case NFS4_OPEN_CLAIM_PREVIOUS:
+	case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
+	case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
 		data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
+	case NFS4_OPEN_CLAIM_FH:
+		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
 		nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
 	}
 	data->timestamp = jiffies;
@@ -1500,6 +1569,16 @@
 				&data->o_res.seq_res,
 				task) != 0)
 		nfs_release_seqid(data->o_arg.seqid);
+
+	/* Set the create mode (note dependency on the session type) */
+	data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
+	if (data->o_arg.open_flags & O_EXCL) {
+		data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
+		if (nfs4_has_persistent_session(clp))
+			data->o_arg.createmode = NFS4_CREATE_GUARDED;
+		else if (clp->cl_mvops->minor_version > 0)
+			data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
+	}
 	return;
 unlock_no_action:
 	rcu_read_unlock();
@@ -1595,8 +1674,11 @@
 	data->rpc_done = 0;
 	data->rpc_status = 0;
 	data->cancelled = 0;
-	if (isrecover)
+	data->is_recover = 0;
+	if (isrecover) {
 		nfs4_set_sequence_privileged(&o_arg->seq_args);
+		data->is_recover = 1;
+	}
 	task = rpc_run_task(&task_setup_data);
         if (IS_ERR(task))
                 return PTR_ERR(task);
@@ -1721,7 +1803,8 @@
 	struct nfs4_opendata *opendata;
 	int ret;
 
-	opendata = nfs4_open_recoverdata_alloc(ctx, state);
+	opendata = nfs4_open_recoverdata_alloc(ctx, state,
+			NFS4_OPEN_CLAIM_FH);
 	if (IS_ERR(opendata))
 		return PTR_ERR(opendata);
 	ret = nfs4_open_recover(opendata, state);
@@ -1739,6 +1822,8 @@
 
 	do {
 		err = _nfs4_open_expired(ctx, state);
+		if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
+			continue;
 		switch (err) {
 		default:
 			goto out;
@@ -1759,7 +1844,7 @@
 
 	ctx = nfs4_state_find_open_context(state);
 	if (IS_ERR(ctx))
-		return PTR_ERR(ctx);
+		return -EAGAIN;
 	ret = nfs4_do_open_expired(ctx, state);
 	put_nfs_open_context(ctx);
 	return ret;
@@ -1821,6 +1906,7 @@
 		clear_bit(NFS_O_RDONLY_STATE, &state->flags);
 		clear_bit(NFS_O_WRONLY_STATE, &state->flags);
 		clear_bit(NFS_O_RDWR_STATE, &state->flags);
+		clear_bit(NFS_OPEN_STATE, &state->flags);
 	}
 	return status;
 }
@@ -1881,10 +1967,8 @@
 	if (ret != 0)
 		goto out;
 
-	if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) {
+	if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
 		nfs4_schedule_stateid_recovery(server, state);
-		nfs4_wait_clnt_recover(server->nfs_client);
-	}
 	*res = state;
 out:
 	return ret;
@@ -1906,6 +1990,7 @@
 	struct nfs4_state     *state = NULL;
 	struct nfs_server       *server = NFS_SERVER(dir);
 	struct nfs4_opendata *opendata;
+	enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
 	int status;
 
 	/* Protect against reboot recovery conflicts */
@@ -1921,7 +2006,10 @@
 	if (dentry->d_inode != NULL)
 		nfs4_return_incompatible_delegation(dentry->d_inode, fmode);
 	status = -ENOMEM;
-	opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL);
+	if (dentry->d_inode)
+		claim = NFS4_OPEN_CLAIM_FH;
+	opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr,
+			claim, GFP_KERNEL);
 	if (opendata == NULL)
 		goto err_put_state_owner;
 
@@ -1938,7 +2026,8 @@
 	if (status != 0)
 		goto err_opendata_put;
 
-	if (opendata->o_arg.open_flags & O_EXCL) {
+	if ((opendata->o_arg.open_flags & O_EXCL) &&
+	    (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
 		nfs4_exclusive_attrset(opendata, sattr);
 
 		nfs_fattr_init(opendata->o_res.f_attr);
@@ -1979,6 +2068,7 @@
 					struct rpc_cred *cred,
 					struct nfs4_threshold **ctx_th)
 {
+	struct nfs_server *server = NFS_SERVER(dir);
 	struct nfs4_exception exception = { };
 	struct nfs4_state *res;
 	int status;
@@ -2022,7 +2112,9 @@
 			exception.retry = 1;
 			continue;
 		}
-		res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
+		if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
+			continue;
+		res = ERR_PTR(nfs4_handle_exception(server,
 					status, &exception));
 	} while (exception.retry);
 	return res;
@@ -2050,20 +2142,25 @@
 		.rpc_cred	= cred,
         };
 	unsigned long timestamp = jiffies;
+	fmode_t fmode;
+	bool truncate;
 	int status;
 
 	nfs_fattr_init(fattr);
 
-	if (state != NULL) {
+	/* Servers should only apply open mode checks for file size changes */
+	truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false;
+	fmode = truncate ? FMODE_WRITE : FMODE_READ;
+
+	if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
+		/* Use that stateid */
+	} else if (truncate && state != NULL && nfs4_valid_open_stateid(state)) {
 		struct nfs_lockowner lockowner = {
 			.l_owner = current->files,
 			.l_pid = current->tgid,
 		};
 		nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
 				&lockowner);
-	} else if (nfs4_copy_delegation_stateid(&arg.stateid, inode,
-				FMODE_WRITE)) {
-		/* Use that stateid */
 	} else
 		nfs4_stateid_copy(&arg.stateid, &zero_stateid);
 
@@ -2087,6 +2184,13 @@
 		err = _nfs4_do_setattr(inode, cred, fattr, sattr, state);
 		switch (err) {
 		case -NFS4ERR_OPENMODE:
+			if (!(sattr->ia_valid & ATTR_SIZE)) {
+				pr_warn_once("NFSv4: server %s is incorrectly "
+						"applying open mode checks to "
+						"a SETATTR that is not "
+						"changing file size.\n",
+						server->nfs_client->cl_hostname);
+			}
 			if (state && !(state->state & FMODE_WRITE)) {
 				err = -EBADF;
 				if (sattr->ia_valid & ATTR_OPEN)
@@ -2130,11 +2234,19 @@
 		fmode_t fmode)
 {
 	spin_lock(&state->owner->so_lock);
-	if (!(fmode & FMODE_READ))
-		clear_bit(NFS_O_RDONLY_STATE, &state->flags);
-	if (!(fmode & FMODE_WRITE))
-		clear_bit(NFS_O_WRONLY_STATE, &state->flags);
 	clear_bit(NFS_O_RDWR_STATE, &state->flags);
+	switch (fmode & (FMODE_READ|FMODE_WRITE)) {
+	case FMODE_WRITE:
+		clear_bit(NFS_O_RDONLY_STATE, &state->flags);
+		break;
+	case FMODE_READ:
+		clear_bit(NFS_O_WRONLY_STATE, &state->flags);
+		break;
+	case 0:
+		clear_bit(NFS_O_RDONLY_STATE, &state->flags);
+		clear_bit(NFS_O_WRONLY_STATE, &state->flags);
+		clear_bit(NFS_OPEN_STATE, &state->flags);
+	}
 	spin_unlock(&state->owner->so_lock);
 }
 
@@ -2202,6 +2314,8 @@
 			calldata->arg.fmode &= ~FMODE_WRITE;
 		}
 	}
+	if (!nfs4_valid_open_stateid(state))
+		call_close = 0;
 	spin_unlock(&state->owner->so_lock);
 
 	if (!call_close) {
@@ -2212,8 +2326,10 @@
 	if (calldata->arg.fmode == 0) {
 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
 		if (calldata->roc &&
-		    pnfs_roc_drain(inode, &calldata->roc_barrier, task))
+		    pnfs_roc_drain(inode, &calldata->roc_barrier, task)) {
+			nfs_release_seqid(calldata->arg.seqid);
 			goto out_wait;
+		    }
 	}
 
 	nfs_fattr_init(calldata->res.fattr);
@@ -2444,7 +2560,7 @@
 
 	auth = rpcauth_create(flavor, server->client);
 	if (IS_ERR(auth)) {
-		ret = -EIO;
+		ret = -EACCES;
 		goto out;
 	}
 	ret = nfs4_lookup_root(server, fhandle, info);
@@ -2452,27 +2568,36 @@
 	return ret;
 }
 
+/*
+ * Retry pseudoroot lookup with various security flavors.  We do this when:
+ *
+ *   NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
+ *   NFSv4.1: the server does not support the SECINFO_NO_NAME operation
+ *
+ * Returns zero on success, or a negative NFS4ERR value, or a
+ * negative errno value.
+ */
 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
 			      struct nfs_fsinfo *info)
 {
-	int i, len, status = 0;
-	rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS];
+	/* Per 3530bis 15.33.5 */
+	static const rpc_authflavor_t flav_array[] = {
+		RPC_AUTH_GSS_KRB5P,
+		RPC_AUTH_GSS_KRB5I,
+		RPC_AUTH_GSS_KRB5,
+		RPC_AUTH_UNIX,			/* courtesy */
+		RPC_AUTH_NULL,
+	};
+	int status = -EPERM;
+	size_t i;
 
-	len = rpcauth_list_flavors(flav_array, ARRAY_SIZE(flav_array));
-	if (len < 0)
-		return len;
-
-	for (i = 0; i < len; i++) {
-		/* AUTH_UNIX is the default flavor if none was specified,
-		 * thus has already been tried. */
-		if (flav_array[i] == RPC_AUTH_UNIX)
-			continue;
-
+	for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
 		status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
 		if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
 			continue;
 		break;
 	}
+
 	/*
 	 * -EACCESS could mean that the user doesn't have correct permissions
 	 * to access the mount.  It could also mean that we tried to mount
@@ -2485,24 +2610,36 @@
 	return status;
 }
 
-/*
- * get the file handle for the "/" directory on the server
+static int nfs4_do_find_root_sec(struct nfs_server *server,
+		struct nfs_fh *fhandle, struct nfs_fsinfo *info)
+{
+	int mv = server->nfs_client->cl_minorversion;
+	return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info);
+}
+
+/**
+ * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
+ * @server: initialized nfs_server handle
+ * @fhandle: we fill in the pseudo-fs root file handle
+ * @info: we fill in an FSINFO struct
+ *
+ * Returns zero on success, or a negative errno.
  */
 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
 			 struct nfs_fsinfo *info)
 {
-	int minor_version = server->nfs_client->cl_minorversion;
-	int status = nfs4_lookup_root(server, fhandle, info);
-	if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR))
-		/*
-		 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM
-		 * by nfs4_map_errors() as this function exits.
-		 */
-		status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info);
+	int status;
+
+	status = nfs4_lookup_root(server, fhandle, info);
+	if ((status == -NFS4ERR_WRONGSEC) &&
+	    !(server->flags & NFS_MOUNT_SECFLAVOUR))
+		status = nfs4_do_find_root_sec(server, fhandle, info);
+
 	if (status == 0)
 		status = nfs4_server_capabilities(server, fhandle);
 	if (status == 0)
 		status = nfs4_do_fsinfo(server, fhandle, info);
+
 	return nfs4_map_errors(status);
 }
 
@@ -3381,12 +3518,21 @@
 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
 {
 	struct nfs4_exception exception = { };
+	unsigned long now = jiffies;
 	int err;
 
 	do {
-		err = nfs4_handle_exception(server,
-				_nfs4_do_fsinfo(server, fhandle, fsinfo),
-				&exception);
+		err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
+		if (err == 0) {
+			struct nfs_client *clp = server->nfs_client;
+
+			spin_lock(&clp->cl_lock);
+			clp->cl_lease_time = fsinfo->lease_time * HZ;
+			clp->cl_last_renewal = now;
+			spin_unlock(&clp->cl_lock);
+			break;
+		}
+		err = nfs4_handle_exception(server, err, &exception);
 	} while (exception.retry);
 	return err;
 }
@@ -3446,6 +3592,46 @@
 	return err;
 }
 
+int nfs4_set_rw_stateid(nfs4_stateid *stateid,
+		const struct nfs_open_context *ctx,
+		const struct nfs_lock_context *l_ctx,
+		fmode_t fmode)
+{
+	const struct nfs_lockowner *lockowner = NULL;
+
+	if (l_ctx != NULL)
+		lockowner = &l_ctx->lockowner;
+	return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner);
+}
+EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
+
+static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
+		const struct nfs_open_context *ctx,
+		const struct nfs_lock_context *l_ctx,
+		fmode_t fmode)
+{
+	nfs4_stateid current_stateid;
+
+	if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode))
+		return false;
+	return nfs4_stateid_match(stateid, &current_stateid);
+}
+
+static bool nfs4_error_stateid_expired(int err)
+{
+	switch (err) {
+	case -NFS4ERR_DELEG_REVOKED:
+	case -NFS4ERR_ADMIN_REVOKED:
+	case -NFS4ERR_BAD_STATEID:
+	case -NFS4ERR_STALE_STATEID:
+	case -NFS4ERR_OLD_STATEID:
+	case -NFS4ERR_OPENMODE:
+	case -NFS4ERR_EXPIRED:
+		return true;
+	}
+	return false;
+}
+
 void __nfs4_read_done_cb(struct nfs_read_data *data)
 {
 	nfs_invalidate_atime(data->header->inode);
@@ -3466,6 +3652,20 @@
 	return 0;
 }
 
+static bool nfs4_read_stateid_changed(struct rpc_task *task,
+		struct nfs_readargs *args)
+{
+
+	if (!nfs4_error_stateid_expired(task->tk_status) ||
+		nfs4_stateid_is_current(&args->stateid,
+				args->context,
+				args->lock_context,
+				FMODE_READ))
+		return false;
+	rpc_restart_call_prepare(task);
+	return true;
+}
+
 static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
 {
 
@@ -3473,7 +3673,8 @@
 
 	if (!nfs4_sequence_done(task, &data->res.seq_res))
 		return -EAGAIN;
-
+	if (nfs4_read_stateid_changed(task, &data->args))
+		return -EAGAIN;
 	return data->read_done_cb ? data->read_done_cb(task, data) :
 				    nfs4_read_done_cb(task, data);
 }
@@ -3488,10 +3689,13 @@
 
 static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
 {
-	nfs4_setup_sequence(NFS_SERVER(data->header->inode),
+	if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
 			&data->args.seq_args,
 			&data->res.seq_res,
-			task);
+			task))
+		return;
+	nfs4_set_rw_stateid(&data->args.stateid, data->args.context,
+			data->args.lock_context, FMODE_READ);
 }
 
 static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
@@ -3509,10 +3713,26 @@
 	return 0;
 }
 
+static bool nfs4_write_stateid_changed(struct rpc_task *task,
+		struct nfs_writeargs *args)
+{
+
+	if (!nfs4_error_stateid_expired(task->tk_status) ||
+		nfs4_stateid_is_current(&args->stateid,
+				args->context,
+				args->lock_context,
+				FMODE_WRITE))
+		return false;
+	rpc_restart_call_prepare(task);
+	return true;
+}
+
 static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
 {
 	if (!nfs4_sequence_done(task, &data->res.seq_res))
 		return -EAGAIN;
+	if (nfs4_write_stateid_changed(task, &data->args))
+		return -EAGAIN;
 	return data->write_done_cb ? data->write_done_cb(task, data) :
 		nfs4_write_done_cb(task, data);
 }
@@ -3552,10 +3772,13 @@
 
 static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
 {
-	nfs4_setup_sequence(NFS_SERVER(data->header->inode),
+	if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
 			&data->args.seq_args,
 			&data->res.seq_res,
-			task);
+			task))
+		return;
+	nfs4_set_rw_stateid(&data->args.stateid, data->args.context,
+			data->args.lock_context, FMODE_WRITE);
 }
 
 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
@@ -3657,7 +3880,7 @@
 		return -ENOMEM;
 	data->client = clp;
 	data->timestamp = jiffies;
-	return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
+	return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
 			&nfs4_renew_ops, data);
 }
 
@@ -3671,7 +3894,7 @@
 	unsigned long now = jiffies;
 	int status;
 
-	status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
+	status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
 	if (status < 0)
 		return status;
 	do_renew_lease(clp, now);
@@ -3981,11 +4204,14 @@
 		case -NFS4ERR_OPENMODE:
 			if (state == NULL)
 				break;
-			nfs4_schedule_stateid_recovery(server, state);
+			if (nfs4_schedule_stateid_recovery(server, state) < 0)
+				goto stateid_invalid;
 			goto wait_on_recovery;
 		case -NFS4ERR_EXPIRED:
-			if (state != NULL)
-				nfs4_schedule_stateid_recovery(server, state);
+			if (state != NULL) {
+				if (nfs4_schedule_stateid_recovery(server, state) < 0)
+					goto stateid_invalid;
+			}
 		case -NFS4ERR_STALE_STATEID:
 		case -NFS4ERR_STALE_CLIENTID:
 			nfs4_schedule_lease_recovery(clp);
@@ -4017,6 +4243,9 @@
 	}
 	task->tk_status = nfs4_map_errors(task->tk_status);
 	return 0;
+stateid_invalid:
+	task->tk_status = -EIO;
+	return 0;
 wait_on_recovery:
 	rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
 	if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
@@ -4144,27 +4373,17 @@
 		struct nfs4_setclientid_res *arg,
 		struct rpc_cred *cred)
 {
-	struct nfs_fsinfo fsinfo;
 	struct rpc_message msg = {
 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
 		.rpc_argp = arg,
-		.rpc_resp = &fsinfo,
 		.rpc_cred = cred,
 	};
-	unsigned long now;
 	int status;
 
 	dprintk("NFS call  setclientid_confirm auth=%s, (client ID %llx)\n",
 		clp->cl_rpcclient->cl_auth->au_ops->au_name,
 		clp->cl_clientid);
-	now = jiffies;
 	status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
-	if (status == 0) {
-		spin_lock(&clp->cl_lock);
-		clp->cl_lease_time = fsinfo.lease_time * HZ;
-		clp->cl_last_renewal = now;
-		spin_unlock(&clp->cl_lock);
-	}
 	dprintk("NFS reply setclientid_confirm: %d\n", status);
 	return status;
 }
@@ -4628,17 +4847,23 @@
 		if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
 			goto out_release_lock_seqid;
 		}
-		data->arg.open_stateid = &state->stateid;
+		data->arg.open_stateid = &state->open_stateid;
 		data->arg.new_lock_owner = 1;
 		data->res.open_seqid = data->arg.open_seqid;
 	} else
 		data->arg.new_lock_owner = 0;
+	if (!nfs4_valid_open_stateid(state)) {
+		data->rpc_status = -EBADF;
+		task->tk_action = NULL;
+		goto out_release_open_seqid;
+	}
 	data->timestamp = jiffies;
 	if (nfs4_setup_sequence(data->server,
 				&data->arg.seq_args,
 				&data->res.seq_res,
 				task) == 0)
 		return;
+out_release_open_seqid:
 	nfs_release_seqid(data->arg.open_seqid);
 out_release_lock_seqid:
 	nfs_release_seqid(data->arg.lock_seqid);
@@ -4984,58 +5209,16 @@
 	return status;
 }
 
-int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
+int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
 {
 	struct nfs_server *server = NFS_SERVER(state->inode);
-	struct nfs4_exception exception = { };
 	int err;
 
 	err = nfs4_set_lock_state(state, fl);
 	if (err != 0)
-		goto out;
-	do {
-		err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
-		switch (err) {
-			default:
-				printk(KERN_ERR "NFS: %s: unhandled error "
-					"%d.\n", __func__, err);
-			case 0:
-			case -ESTALE:
-				goto out;
-			case -NFS4ERR_STALE_CLIENTID:
-			case -NFS4ERR_STALE_STATEID:
-				set_bit(NFS_DELEGATED_STATE, &state->flags);
-			case -NFS4ERR_EXPIRED:
-				nfs4_schedule_lease_recovery(server->nfs_client);
-				err = -EAGAIN;
-				goto out;
-			case -NFS4ERR_BADSESSION:
-			case -NFS4ERR_BADSLOT:
-			case -NFS4ERR_BAD_HIGH_SLOT:
-			case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
-			case -NFS4ERR_DEADSESSION:
-				set_bit(NFS_DELEGATED_STATE, &state->flags);
-				nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
-				err = -EAGAIN;
-				goto out;
-			case -NFS4ERR_DELEG_REVOKED:
-			case -NFS4ERR_ADMIN_REVOKED:
-			case -NFS4ERR_BAD_STATEID:
-			case -NFS4ERR_OPENMODE:
-				nfs4_schedule_stateid_recovery(server, state);
-				err = 0;
-				goto out;
-			case -ENOMEM:
-			case -NFS4ERR_DENIED:
-				/* kill_proc(fl->fl_pid, SIGLOST, 1); */
-				err = 0;
-				goto out;
-		}
-		set_bit(NFS_DELEGATED_STATE, &state->flags);
-		err = nfs4_handle_exception(server, err, &exception);
-	} while (exception.retry);
-out:
-	return err;
+		return err;
+	err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
+	return nfs4_handle_delegation_recall_error(server, state, stateid, err);
 }
 
 struct nfs_release_lockowner_data {
@@ -5849,7 +6032,7 @@
 		.rpc_client = clp->cl_rpcclient,
 		.rpc_message = &msg,
 		.callback_ops = &nfs41_sequence_ops,
-		.flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
+		.flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
 	};
 
 	if (!atomic_inc_not_zero(&clp->cl_count))
@@ -6726,6 +6909,10 @@
 
 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
 	.minor_version = 0,
+	.init_caps = NFS_CAP_READDIRPLUS
+		| NFS_CAP_ATOMIC_OPEN
+		| NFS_CAP_CHANGE_ATTR
+		| NFS_CAP_POSIX_LOCK,
 	.call_sync = _nfs4_call_sync,
 	.match_stateid = nfs4_match_stateid,
 	.find_root_sec = nfs4_find_root_sec,
@@ -6737,6 +6924,12 @@
 #if defined(CONFIG_NFS_V4_1)
 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
 	.minor_version = 1,
+	.init_caps = NFS_CAP_READDIRPLUS
+		| NFS_CAP_ATOMIC_OPEN
+		| NFS_CAP_CHANGE_ATTR
+		| NFS_CAP_POSIX_LOCK
+		| NFS_CAP_STATEID_NFSV41
+		| NFS_CAP_ATOMIC_OPEN_V1,
 	.call_sync = nfs4_call_sync_sequence,
 	.match_stateid = nfs41_match_stateid,
 	.find_root_sec = nfs41_find_root_sec,
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index d41a351..0b32f94 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -154,18 +154,6 @@
 	return cred;
 }
 
-static void nfs4_clear_machine_cred(struct nfs_client *clp)
-{
-	struct rpc_cred *cred;
-
-	spin_lock(&clp->cl_lock);
-	cred = clp->cl_machine_cred;
-	clp->cl_machine_cred = NULL;
-	spin_unlock(&clp->cl_lock);
-	if (cred != NULL)
-		put_rpccred(cred);
-}
-
 static struct rpc_cred *
 nfs4_get_renew_cred_server_locked(struct nfs_server *server)
 {
@@ -699,6 +687,8 @@
 	list_for_each_entry(state, &nfsi->open_states, inode_states) {
 		if (state->owner != owner)
 			continue;
+		if (!nfs4_valid_open_stateid(state))
+			continue;
 		if (atomic_inc_not_zero(&state->count))
 			return state;
 	}
@@ -987,13 +977,14 @@
 	return 0;
 }
 
-static bool nfs4_copy_lock_stateid(nfs4_stateid *dst, struct nfs4_state *state,
+static int nfs4_copy_lock_stateid(nfs4_stateid *dst,
+		struct nfs4_state *state,
 		const struct nfs_lockowner *lockowner)
 {
 	struct nfs4_lock_state *lsp;
 	fl_owner_t fl_owner;
 	pid_t fl_pid;
-	bool ret = false;
+	int ret = -ENOENT;
 
 
 	if (lockowner == NULL)
@@ -1008,7 +999,10 @@
 	lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE);
 	if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) {
 		nfs4_stateid_copy(dst, &lsp->ls_stateid);
-		ret = true;
+		ret = 0;
+		smp_rmb();
+		if (!list_empty(&lsp->ls_seqid.list))
+			ret = -EWOULDBLOCK;
 	}
 	spin_unlock(&state->state_lock);
 	nfs4_put_lock_state(lsp);
@@ -1016,28 +1010,44 @@
 	return ret;
 }
 
-static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
+static int nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
 {
+	const nfs4_stateid *src;
+	int ret;
 	int seq;
 
 	do {
+		src = &zero_stateid;
 		seq = read_seqbegin(&state->seqlock);
-		nfs4_stateid_copy(dst, &state->stateid);
+		if (test_bit(NFS_OPEN_STATE, &state->flags))
+			src = &state->open_stateid;
+		nfs4_stateid_copy(dst, src);
+		ret = 0;
+		smp_rmb();
+		if (!list_empty(&state->owner->so_seqid.list))
+			ret = -EWOULDBLOCK;
 	} while (read_seqretry(&state->seqlock, seq));
+	return ret;
 }
 
 /*
  * Byte-range lock aware utility to initialize the stateid of read/write
  * requests.
  */
-void nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state,
+int nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state,
 		fmode_t fmode, const struct nfs_lockowner *lockowner)
 {
+	int ret = 0;
 	if (nfs4_copy_delegation_stateid(dst, state->inode, fmode))
-		return;
-	if (nfs4_copy_lock_stateid(dst, state, lockowner))
-		return;
-	nfs4_copy_open_stateid(dst, state);
+		goto out;
+	ret = nfs4_copy_lock_stateid(dst, state, lockowner);
+	if (ret != -ENOENT)
+		goto out;
+	ret = nfs4_copy_open_stateid(dst, state);
+out:
+	if (nfs_server_capable(state->inode, NFS_CAP_STATEID_NFSV41))
+		dst->seqid = 0;
+	return ret;
 }
 
 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask)
@@ -1286,14 +1296,17 @@
 	return 1;
 }
 
-void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state)
+int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state)
 {
 	struct nfs_client *clp = server->nfs_client;
 
+	if (!nfs4_valid_open_stateid(state))
+		return -EBADF;
 	nfs4_state_mark_reclaim_nograce(clp, state);
 	dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
 			clp->cl_hostname);
 	nfs4_schedule_state_manager(clp);
+	return 0;
 }
 EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
 
@@ -1323,6 +1336,27 @@
 		nfs4_schedule_state_manager(clp);
 }
 
+static void nfs4_state_mark_open_context_bad(struct nfs4_state *state)
+{
+	struct inode *inode = state->inode;
+	struct nfs_inode *nfsi = NFS_I(inode);
+	struct nfs_open_context *ctx;
+
+	spin_lock(&inode->i_lock);
+	list_for_each_entry(ctx, &nfsi->open_files, list) {
+		if (ctx->state != state)
+			continue;
+		set_bit(NFS_CONTEXT_BAD, &ctx->flags);
+	}
+	spin_unlock(&inode->i_lock);
+}
+
+static void nfs4_state_mark_recovery_failed(struct nfs4_state *state, int error)
+{
+	set_bit(NFS_STATE_RECOVERY_FAILED, &state->flags);
+	nfs4_state_mark_open_context_bad(state);
+}
+
 
 static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
 {
@@ -1398,6 +1432,8 @@
 	list_for_each_entry(state, &sp->so_states, open_states) {
 		if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
 			continue;
+		if (!nfs4_valid_open_stateid(state))
+			continue;
 		if (state->state == 0)
 			continue;
 		atomic_inc(&state->count);
@@ -1430,11 +1466,10 @@
 				 * Open state on this file cannot be recovered
 				 * All we can do is revert to using the zero stateid.
 				 */
-				memset(&state->stateid, 0,
-					sizeof(state->stateid));
-				/* Mark the file as being 'closed' */
-				state->state = 0;
+				nfs4_state_mark_recovery_failed(state, status);
 				break;
+			case -EAGAIN:
+				ssleep(1);
 			case -NFS4ERR_ADMIN_REVOKED:
 			case -NFS4ERR_STALE_STATEID:
 			case -NFS4ERR_BAD_STATEID:
@@ -1696,6 +1731,10 @@
 	}
 	status = ops->renew_lease(clp, cred);
 	put_rpccred(cred);
+	if (status == -ETIMEDOUT) {
+		set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
+		return 0;
+	}
 out:
 	return nfs4_recovery_handle_error(clp, status);
 }
@@ -1725,10 +1764,6 @@
 		clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
 		return -EPERM;
 	case -EACCES:
-		if (clp->cl_machine_cred == NULL)
-			return -EACCES;
-		/* Handle case where the user hasn't set up machine creds */
-		nfs4_clear_machine_cred(clp);
 	case -NFS4ERR_DELAY:
 	case -ETIMEDOUT:
 	case -EAGAIN:
@@ -1823,31 +1858,18 @@
 {
 	const struct nfs4_state_recovery_ops *ops =
 				clp->cl_mvops->reboot_recovery_ops;
-	rpc_authflavor_t *flavors, flav, save;
 	struct rpc_clnt *clnt;
 	struct rpc_cred *cred;
-	int i, len, status;
+	int i, status;
 
 	dprintk("NFS: %s: testing '%s'\n", __func__, clp->cl_hostname);
 
-	len = NFS_MAX_SECFLAVORS;
-	flavors = kcalloc(len, sizeof(*flavors), GFP_KERNEL);
-	if (flavors == NULL) {
-		status = -ENOMEM;
-		goto out;
-	}
-	len = rpcauth_list_flavors(flavors, len);
-	if (len < 0) {
-		status = len;
-		goto out_free;
-	}
 	clnt = clp->cl_rpcclient;
-	save = clnt->cl_auth->au_flavor;
 	i = 0;
 
 	mutex_lock(&nfs_clid_init_mutex);
-	status  = -ENOENT;
 again:
+	status  = -ENOENT;
 	cred = ops->get_clid_cred(clp);
 	if (cred == NULL)
 		goto out_unlock;
@@ -1857,12 +1879,6 @@
 	switch (status) {
 	case 0:
 		break;
-
-	case -EACCES:
-		if (clp->cl_machine_cred == NULL)
-			break;
-		/* Handle case where the user hasn't set up machine creds */
-		nfs4_clear_machine_cred(clp);
 	case -NFS4ERR_DELAY:
 	case -ETIMEDOUT:
 	case -EAGAIN:
@@ -1871,17 +1887,12 @@
 		dprintk("NFS: %s after status %d, retrying\n",
 			__func__, status);
 		goto again;
-
+	case -EACCES:
+		if (i++)
+			break;
 	case -NFS4ERR_CLID_INUSE:
 	case -NFS4ERR_WRONGSEC:
-		status = -EPERM;
-		if (i >= len)
-			break;
-
-		flav = flavors[i++];
-		if (flav == save)
-			flav = flavors[i++];
-		clnt = rpc_clone_client_set_auth(clnt, flav);
+		clnt = rpc_clone_client_set_auth(clnt, RPC_AUTH_UNIX);
 		if (IS_ERR(clnt)) {
 			status = PTR_ERR(clnt);
 			break;
@@ -1903,13 +1914,15 @@
 	case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
 				 * in nfs4_exchange_id */
 		status = -EKEYEXPIRED;
+		break;
+	default:
+		pr_warn("NFS: %s unhandled error %d. Exiting with error EIO\n",
+				__func__, status);
+		status = -EIO;
 	}
 
 out_unlock:
 	mutex_unlock(&nfs_clid_init_mutex);
-out_free:
-	kfree(flavors);
-out:
 	dprintk("NFS: %s: status = %d\n", __func__, status);
 	return status;
 }
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c
index 569b166..a5e1a30 100644
--- a/fs/nfs/nfs4super.c
+++ b/fs/nfs/nfs4super.c
@@ -252,6 +252,8 @@
 
 	dfprintk(MOUNT, "--> nfs4_try_mount()\n");
 
+	if (data->auth_flavors[0] == RPC_AUTH_MAXFLAVOR)
+		data->auth_flavors[0] = RPC_AUTH_UNIX;
 	export_path = data->nfs_server.export_path;
 	data->nfs_server.export_path = "/";
 	root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, mount_info,
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index e3edda5..3c79c58 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -530,14 +530,10 @@
 				decode_setclientid_maxsz)
 #define NFS4_enc_setclientid_confirm_sz \
 				(compound_encode_hdr_maxsz + \
-				encode_setclientid_confirm_maxsz + \
-				encode_putrootfh_maxsz + \
-				encode_fsinfo_maxsz)
+				encode_setclientid_confirm_maxsz)
 #define NFS4_dec_setclientid_confirm_sz \
 				(compound_decode_hdr_maxsz + \
-				decode_setclientid_confirm_maxsz + \
-				decode_putrootfh_maxsz + \
-				decode_fsinfo_maxsz)
+				decode_setclientid_confirm_maxsz)
 #define NFS4_enc_lock_sz        (compound_encode_hdr_maxsz + \
 				encode_sequence_maxsz + \
 				encode_putfh_maxsz + \
@@ -1058,8 +1054,7 @@
 	if (iap->ia_valid & ATTR_ATIME_SET) {
 		bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
 		*p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
-		*p++ = cpu_to_be32(0);
-		*p++ = cpu_to_be32(iap->ia_atime.tv_sec);
+		p = xdr_encode_hyper(p, (s64)iap->ia_atime.tv_sec);
 		*p++ = cpu_to_be32(iap->ia_atime.tv_nsec);
 	}
 	else if (iap->ia_valid & ATTR_ATIME) {
@@ -1069,8 +1064,7 @@
 	if (iap->ia_valid & ATTR_MTIME_SET) {
 		bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET;
 		*p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
-		*p++ = cpu_to_be32(0);
-		*p++ = cpu_to_be32(iap->ia_mtime.tv_sec);
+		p = xdr_encode_hyper(p, (s64)iap->ia_mtime.tv_sec);
 		*p++ = cpu_to_be32(iap->ia_mtime.tv_nsec);
 	}
 	else if (iap->ia_valid & ATTR_MTIME) {
@@ -1366,33 +1360,28 @@
 
 static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg)
 {
+	struct iattr dummy;
 	__be32 *p;
-	struct nfs_client *clp;
 
 	p = reserve_space(xdr, 4);
-	switch(arg->open_flags & O_EXCL) {
-	case 0:
+	switch(arg->createmode) {
+	case NFS4_CREATE_UNCHECKED:
 		*p = cpu_to_be32(NFS4_CREATE_UNCHECKED);
 		encode_attrs(xdr, arg->u.attrs, arg->server);
 		break;
-	default:
-		clp = arg->server->nfs_client;
-		if (clp->cl_mvops->minor_version > 0) {
-			if (nfs4_has_persistent_session(clp)) {
-				*p = cpu_to_be32(NFS4_CREATE_GUARDED);
-				encode_attrs(xdr, arg->u.attrs, arg->server);
-			} else {
-				struct iattr dummy;
-
-				*p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE4_1);
-				encode_nfs4_verifier(xdr, &arg->u.verifier);
-				dummy.ia_valid = 0;
-				encode_attrs(xdr, &dummy, arg->server);
-			}
-		} else {
-			*p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE);
-			encode_nfs4_verifier(xdr, &arg->u.verifier);
-		}
+	case NFS4_CREATE_GUARDED:
+		*p = cpu_to_be32(NFS4_CREATE_GUARDED);
+		encode_attrs(xdr, arg->u.attrs, arg->server);
+		break;
+	case NFS4_CREATE_EXCLUSIVE:
+		*p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE);
+		encode_nfs4_verifier(xdr, &arg->u.verifier);
+		break;
+	case NFS4_CREATE_EXCLUSIVE4_1:
+		*p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE4_1);
+		encode_nfs4_verifier(xdr, &arg->u.verifier);
+		dummy.ia_valid = 0;
+		encode_attrs(xdr, &dummy, arg->server);
 	}
 }
 
@@ -1459,6 +1448,23 @@
 	encode_string(xdr, name->len, name->name);
 }
 
+static inline void encode_claim_fh(struct xdr_stream *xdr)
+{
+	__be32 *p;
+
+	p = reserve_space(xdr, 4);
+	*p = cpu_to_be32(NFS4_OPEN_CLAIM_FH);
+}
+
+static inline void encode_claim_delegate_cur_fh(struct xdr_stream *xdr, const nfs4_stateid *stateid)
+{
+	__be32 *p;
+
+	p = reserve_space(xdr, 4);
+	*p = cpu_to_be32(NFS4_OPEN_CLAIM_DELEG_CUR_FH);
+	encode_nfs4_stateid(xdr, stateid);
+}
+
 static void encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg, struct compound_hdr *hdr)
 {
 	encode_op_hdr(xdr, OP_OPEN, decode_open_maxsz, hdr);
@@ -1474,6 +1480,12 @@
 	case NFS4_OPEN_CLAIM_DELEGATE_CUR:
 		encode_claim_delegate_cur(xdr, arg->name, &arg->u.delegation);
 		break;
+	case NFS4_OPEN_CLAIM_FH:
+		encode_claim_fh(xdr);
+		break;
+	case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
+		encode_claim_delegate_cur_fh(xdr, &arg->u.delegation);
+		break;
 	default:
 		BUG();
 	}
@@ -1506,35 +1518,12 @@
 	encode_op_hdr(xdr, OP_PUTROOTFH, decode_putrootfh_maxsz, hdr);
 }
 
-static void encode_open_stateid(struct xdr_stream *xdr,
-		const struct nfs_open_context *ctx,
-		const struct nfs_lock_context *l_ctx,
-		fmode_t fmode,
-		int zero_seqid)
-{
-	nfs4_stateid stateid;
-
-	if (ctx->state != NULL) {
-		const struct nfs_lockowner *lockowner = NULL;
-
-		if (l_ctx != NULL)
-			lockowner = &l_ctx->lockowner;
-		nfs4_select_rw_stateid(&stateid, ctx->state,
-				fmode, lockowner);
-		if (zero_seqid)
-			stateid.seqid = 0;
-		encode_nfs4_stateid(xdr, &stateid);
-	} else
-		encode_nfs4_stateid(xdr, &zero_stateid);
-}
-
 static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args, struct compound_hdr *hdr)
 {
 	__be32 *p;
 
 	encode_op_hdr(xdr, OP_READ, decode_read_maxsz, hdr);
-	encode_open_stateid(xdr, args->context, args->lock_context,
-			FMODE_READ, hdr->minorversion);
+	encode_nfs4_stateid(xdr, &args->stateid);
 
 	p = reserve_space(xdr, 12);
 	p = xdr_encode_hyper(p, args->offset);
@@ -1670,8 +1659,7 @@
 	__be32 *p;
 
 	encode_op_hdr(xdr, OP_WRITE, decode_write_maxsz, hdr);
-	encode_open_stateid(xdr, args->context, args->lock_context,
-			FMODE_WRITE, hdr->minorversion);
+	encode_nfs4_stateid(xdr, &args->stateid);
 
 	p = reserve_space(xdr, 16);
 	p = xdr_encode_hyper(p, args->offset);
@@ -2609,12 +2597,9 @@
 	struct compound_hdr hdr = {
 		.nops	= 0,
 	};
-	const u32 lease_bitmap[3] = { FATTR4_WORD0_LEASE_TIME };
 
 	encode_compound_hdr(xdr, req, &hdr);
 	encode_setclientid_confirm(xdr, arg, &hdr);
-	encode_putrootfh(xdr, &hdr);
-	encode_fsinfo(xdr, lease_bitmap, &hdr);
 	encode_nops(&hdr);
 }
 
@@ -3497,8 +3482,11 @@
 	if (n == 0)
 		goto root_path;
 	dprintk("pathname4: ");
-	path->ncomponents = 0;
-	while (path->ncomponents < n) {
+	if (n > NFS4_PATHNAME_MAXCOMPONENTS) {
+		dprintk("cannot parse %d components in path\n", n);
+		goto out_eio;
+	}
+	for (path->ncomponents = 0; path->ncomponents < n; path->ncomponents++) {
 		struct nfs4_string *component = &path->components[path->ncomponents];
 		status = decode_opaque_inline(xdr, &component->len, &component->data);
 		if (unlikely(status != 0))
@@ -3507,12 +3495,6 @@
 			pr_cont("%s%.*s ",
 				(path->ncomponents != n ? "/ " : ""),
 				component->len, component->data);
-		if (path->ncomponents < NFS4_PATHNAME_MAXCOMPONENTS)
-			path->ncomponents++;
-		else {
-			dprintk("cannot parse %d components in path\n", n);
-			goto out_eio;
-		}
 	}
 out:
 	return status;
@@ -3557,27 +3539,23 @@
 	n = be32_to_cpup(p);
 	if (n <= 0)
 		goto out_eio;
-	res->nlocations = 0;
-	while (res->nlocations < n) {
+	for (res->nlocations = 0; res->nlocations < n; res->nlocations++) {
 		u32 m;
-		struct nfs4_fs_location *loc = &res->locations[res->nlocations];
+		struct nfs4_fs_location *loc;
 
+		if (res->nlocations == NFS4_FS_LOCATIONS_MAXENTRIES)
+			break;
+		loc = &res->locations[res->nlocations];
 		p = xdr_inline_decode(xdr, 4);
 		if (unlikely(!p))
 			goto out_overflow;
 		m = be32_to_cpup(p);
 
-		loc->nservers = 0;
 		dprintk("%s: servers:\n", __func__);
-		while (loc->nservers < m) {
-			struct nfs4_string *server = &loc->servers[loc->nservers];
-			status = decode_opaque_inline(xdr, &server->len, &server->data);
-			if (unlikely(status != 0))
-				goto out_eio;
-			dprintk("%s ", server->data);
-			if (loc->nservers < NFS4_FS_LOCATION_MAXSERVERS)
-				loc->nservers++;
-			else {
+		for (loc->nservers = 0; loc->nservers < m; loc->nservers++) {
+			struct nfs4_string *server;
+
+			if (loc->nservers == NFS4_FS_LOCATION_MAXSERVERS) {
 				unsigned int i;
 				dprintk("%s: using first %u of %u servers "
 					"returned for location %u\n",
@@ -3591,13 +3569,17 @@
 					if (unlikely(status != 0))
 						goto out_eio;
 				}
+				break;
 			}
+			server = &loc->servers[loc->nservers];
+			status = decode_opaque_inline(xdr, &server->len, &server->data);
+			if (unlikely(status != 0))
+				goto out_eio;
+			dprintk("%s ", server->data);
 		}
 		status = decode_pathname(xdr, &loc->rootpath);
 		if (unlikely(status != 0))
 			goto out_eio;
-		if (res->nlocations < NFS4_FS_LOCATIONS_MAXENTRIES)
-			res->nlocations++;
 	}
 	if (res->nlocations != 0)
 		status = NFS_ATTR_FATTR_V4_LOCATIONS;
@@ -5209,27 +5191,30 @@
 	return decode_op_hdr(xdr, OP_DELEGRETURN);
 }
 
-static int decode_secinfo_gss(struct xdr_stream *xdr, struct nfs4_secinfo_flavor *flavor)
+static int decode_secinfo_gss(struct xdr_stream *xdr,
+			      struct nfs4_secinfo4 *flavor)
 {
+	u32 oid_len;
 	__be32 *p;
 
 	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(!p))
 		goto out_overflow;
-	flavor->gss.sec_oid4.len = be32_to_cpup(p);
-	if (flavor->gss.sec_oid4.len > GSS_OID_MAX_LEN)
+	oid_len = be32_to_cpup(p);
+	if (oid_len > GSS_OID_MAX_LEN)
 		goto out_err;
 
-	p = xdr_inline_decode(xdr, flavor->gss.sec_oid4.len);
+	p = xdr_inline_decode(xdr, oid_len);
 	if (unlikely(!p))
 		goto out_overflow;
-	memcpy(flavor->gss.sec_oid4.data, p, flavor->gss.sec_oid4.len);
+	memcpy(flavor->flavor_info.oid.data, p, oid_len);
+	flavor->flavor_info.oid.len = oid_len;
 
 	p = xdr_inline_decode(xdr, 8);
 	if (unlikely(!p))
 		goto out_overflow;
-	flavor->gss.qop4 = be32_to_cpup(p++);
-	flavor->gss.service = be32_to_cpup(p);
+	flavor->flavor_info.qop = be32_to_cpup(p++);
+	flavor->flavor_info.service = be32_to_cpup(p);
 
 	return 0;
 
@@ -5242,10 +5227,10 @@
 
 static int decode_secinfo_common(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
 {
-	struct nfs4_secinfo_flavor *sec_flavor;
+	struct nfs4_secinfo4 *sec_flavor;
+	unsigned int i, num_flavors;
 	int status;
 	__be32 *p;
-	int i, num_flavors;
 
 	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(!p))
@@ -6648,8 +6633,7 @@
  * Decode SETCLIENTID_CONFIRM response
  */
 static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req,
-					    struct xdr_stream *xdr,
-					    struct nfs_fsinfo *fsinfo)
+					    struct xdr_stream *xdr)
 {
 	struct compound_hdr hdr;
 	int status;
@@ -6657,10 +6641,6 @@
 	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
 		status = decode_setclientid_confirm(xdr);
-	if (!status)
-		status = decode_putrootfh(xdr);
-	if (!status)
-		status = decode_fsinfo(xdr, fsinfo);
 	return status;
 }
 
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index e56e846..29cfb7a 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -84,6 +84,55 @@
 	kmem_cache_free(nfs_page_cachep, p);
 }
 
+static void
+nfs_iocounter_inc(struct nfs_io_counter *c)
+{
+	atomic_inc(&c->io_count);
+}
+
+static void
+nfs_iocounter_dec(struct nfs_io_counter *c)
+{
+	if (atomic_dec_and_test(&c->io_count)) {
+		clear_bit(NFS_IO_INPROGRESS, &c->flags);
+		smp_mb__after_clear_bit();
+		wake_up_bit(&c->flags, NFS_IO_INPROGRESS);
+	}
+}
+
+static int
+__nfs_iocounter_wait(struct nfs_io_counter *c)
+{
+	wait_queue_head_t *wq = bit_waitqueue(&c->flags, NFS_IO_INPROGRESS);
+	DEFINE_WAIT_BIT(q, &c->flags, NFS_IO_INPROGRESS);
+	int ret = 0;
+
+	do {
+		prepare_to_wait(wq, &q.wait, TASK_KILLABLE);
+		set_bit(NFS_IO_INPROGRESS, &c->flags);
+		if (atomic_read(&c->io_count) == 0)
+			break;
+		ret = nfs_wait_bit_killable(&c->flags);
+	} while (atomic_read(&c->io_count) != 0);
+	finish_wait(wq, &q.wait);
+	return ret;
+}
+
+/**
+ * nfs_iocounter_wait - wait for i/o to complete
+ * @c: nfs_io_counter to use
+ *
+ * returns -ERESTARTSYS if interrupted by a fatal signal.
+ * Otherwise returns 0 once the io_count hits 0.
+ */
+int
+nfs_iocounter_wait(struct nfs_io_counter *c)
+{
+	if (atomic_read(&c->io_count) == 0)
+		return 0;
+	return __nfs_iocounter_wait(c);
+}
+
 /**
  * nfs_create_request - Create an NFS read/write request.
  * @ctx: open context to use
@@ -104,6 +153,8 @@
 	struct nfs_page		*req;
 	struct nfs_lock_context *l_ctx;
 
+	if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
+		return ERR_PTR(-EBADF);
 	/* try to allocate the request struct */
 	req = nfs_page_alloc();
 	if (req == NULL)
@@ -116,6 +167,7 @@
 		return ERR_CAST(l_ctx);
 	}
 	req->wb_lock_context = l_ctx;
+	nfs_iocounter_inc(&l_ctx->io_count);
 
 	/* Initialize the request struct. Initially, we assume a
 	 * long write-back delay. This will be adjusted in
@@ -175,6 +227,7 @@
 		req->wb_page = NULL;
 	}
 	if (l_ctx != NULL) {
+		nfs_iocounter_dec(&l_ctx->io_count);
 		nfs_put_lock_context(l_ctx);
 		req->wb_lock_context = NULL;
 	}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 4bdffe0..c5bd758e 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -718,6 +718,8 @@
 	spin_lock(&lo->plh_inode->i_lock);
 	if (pnfs_layoutgets_blocked(lo, 1)) {
 		status = -EAGAIN;
+	} else if (!nfs4_valid_open_stateid(open_state)) {
+		status = -EBADF;
 	} else if (list_empty(&lo->plh_segs)) {
 		int seq;
 
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index a5e5d98..70a26c6 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -514,6 +514,8 @@
 {
 	struct nfs_read_data *data = calldata;
 	NFS_PROTO(data->header->inode)->read_rpc_prepare(task, data);
+	if (unlikely(test_bit(NFS_CONTEXT_BAD, &data->args.context->flags)))
+		rpc_exit(task, -EIO);
 }
 
 static const struct rpc_call_ops nfs_read_common_ops = {
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 2f8a29d..1bb071d 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -920,7 +920,7 @@
 		data->mount_server.port	= NFS_UNSPEC_PORT;
 		data->nfs_server.port	= NFS_UNSPEC_PORT;
 		data->nfs_server.protocol = XPRT_TRANSPORT_TCP;
-		data->auth_flavors[0]	= RPC_AUTH_UNIX;
+		data->auth_flavors[0]	= RPC_AUTH_MAXFLAVOR;
 		data->auth_flavor_len	= 1;
 		data->minorversion	= 0;
 		data->need_mount	= true;
@@ -1608,49 +1608,57 @@
 }
 
 /*
- * Match the requested auth flavors with the list returned by
- * the server.  Returns zero and sets the mount's authentication
- * flavor on success; returns -EACCES if server does not support
- * the requested flavor.
+ * Select a security flavor for this mount.  The selected flavor
+ * is planted in args->auth_flavors[0].
  */
-static int nfs_walk_authlist(struct nfs_parsed_mount_data *args,
-			     struct nfs_mount_request *request)
+static void nfs_select_flavor(struct nfs_parsed_mount_data *args,
+			      struct nfs_mount_request *request)
 {
-	unsigned int i, j, server_authlist_len = *(request->auth_flav_len);
+	unsigned int i, count = *(request->auth_flav_len);
+	rpc_authflavor_t flavor;
+
+	if (args->auth_flavors[0] != RPC_AUTH_MAXFLAVOR)
+		goto out;
+
+	/*
+	 * The NFSv2 MNT operation does not return a flavor list.
+	 */
+	if (args->mount_server.version != NFS_MNT3_VERSION)
+		goto out_default;
 
 	/*
 	 * Certain releases of Linux's mountd return an empty
-	 * flavor list.  To prevent behavioral regression with
-	 * these servers (ie. rejecting mounts that used to
-	 * succeed), revert to pre-2.6.32 behavior (no checking)
-	 * if the returned flavor list is empty.
+	 * flavor list in some cases.
 	 */
-	if (server_authlist_len == 0)
-		return 0;
+	if (count == 0)
+		goto out_default;
 
 	/*
-	 * We avoid sophisticated negotiating here, as there are
-	 * plenty of cases where we can get it wrong, providing
-	 * either too little or too much security.
-	 *
 	 * RFC 2623, section 2.7 suggests we SHOULD prefer the
 	 * flavor listed first.  However, some servers list
-	 * AUTH_NULL first.  Our caller plants AUTH_SYS, the
-	 * preferred default, in args->auth_flavors[0] if user
-	 * didn't specify sec= mount option.
+	 * AUTH_NULL first.  Avoid ever choosing AUTH_NULL.
 	 */
-	for (i = 0; i < args->auth_flavor_len; i++)
-		for (j = 0; j < server_authlist_len; j++)
-			if (args->auth_flavors[i] == request->auth_flavs[j]) {
-				dfprintk(MOUNT, "NFS: using auth flavor %d\n",
-					request->auth_flavs[j]);
-				args->auth_flavors[0] = request->auth_flavs[j];
-				return 0;
-			}
+	for (i = 0; i < count; i++) {
+		struct rpcsec_gss_info info;
 
-	dfprintk(MOUNT, "NFS: server does not support requested auth flavor\n");
-	nfs_umount(request);
-	return -EACCES;
+		flavor = request->auth_flavs[i];
+		switch (flavor) {
+		case RPC_AUTH_UNIX:
+			goto out_set;
+		case RPC_AUTH_NULL:
+			continue;
+		default:
+			if (rpcauth_get_gssinfo(flavor, &info) == 0)
+				goto out_set;
+		}
+	}
+
+out_default:
+	flavor = RPC_AUTH_UNIX;
+out_set:
+	args->auth_flavors[0] = flavor;
+out:
+	dfprintk(MOUNT, "NFS: using auth flavor %d\n", args->auth_flavors[0]);
 }
 
 /*
@@ -1713,12 +1721,8 @@
 		return status;
 	}
 
-	/*
-	 * MNTv1 (NFSv2) does not support auth flavor negotiation.
-	 */
-	if (args->mount_server.version != NFS_MNT3_VERSION)
-		return 0;
-	return nfs_walk_authlist(args, &request);
+	nfs_select_flavor(args, &request);
+	return 0;
 }
 
 struct dentry *nfs_try_mount(int flags, const char *dev_name,
@@ -2381,10 +2385,9 @@
 			  struct nfs_mount_info *mount_info)
 {
 	/* clone any lsm security options from the parent to the new sb */
-	security_sb_clone_mnt_opts(mount_info->cloned->sb, s);
 	if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops)
 		return -ESTALE;
-	return 0;
+	return security_sb_clone_mnt_opts(mount_info->cloned->sb, s);
 }
 EXPORT_SYMBOL_GPL(nfs_clone_sb_security);
 
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index c483cc5..a2c7c28 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1251,6 +1251,8 @@
 {
 	struct nfs_write_data *data = calldata;
 	NFS_PROTO(data->header->inode)->write_rpc_prepare(task, data);
+	if (unlikely(test_bit(NFS_CONTEXT_BAD, &data->args.context->flags)))
+		rpc_exit(task, -EIO);
 }
 
 void nfs_commit_prepare(struct rpc_task *task, void *calldata)
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index a272007..2502951 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -3138,10 +3138,9 @@
 
 static __be32
 nfsd4_do_encode_secinfo(struct nfsd4_compoundres *resp,
-			 __be32 nfserr,struct svc_export *exp)
+			 __be32 nfserr, struct svc_export *exp)
 {
-	int i = 0;
-	u32 nflavs;
+	u32 i, nflavs;
 	struct exp_flavor_info *flavs;
 	struct exp_flavor_info def_flavs[2];
 	__be32 *p;
@@ -3172,30 +3171,29 @@
 	WRITE32(nflavs);
 	ADJUST_ARGS();
 	for (i = 0; i < nflavs; i++) {
-		u32 flav = flavs[i].pseudoflavor;
-		struct gss_api_mech *gm = gss_mech_get_by_pseudoflavor(flav);
+		struct rpcsec_gss_info info;
 
-		if (gm) {
+		if (rpcauth_get_gssinfo(flavs[i].pseudoflavor, &info) == 0) {
 			RESERVE_SPACE(4);
 			WRITE32(RPC_AUTH_GSS);
 			ADJUST_ARGS();
-			RESERVE_SPACE(4 + gm->gm_oid.len);
-			WRITE32(gm->gm_oid.len);
-			WRITEMEM(gm->gm_oid.data, gm->gm_oid.len);
+			RESERVE_SPACE(4 + info.oid.len);
+			WRITE32(info.oid.len);
+			WRITEMEM(info.oid.data, info.oid.len);
 			ADJUST_ARGS();
 			RESERVE_SPACE(4);
-			WRITE32(0); /* qop */
+			WRITE32(info.qop);
 			ADJUST_ARGS();
 			RESERVE_SPACE(4);
-			WRITE32(gss_pseudoflavor_to_service(gm, flav));
+			WRITE32(info.service);
 			ADJUST_ARGS();
-			gss_mech_put(gm);
 		} else {
 			RESERVE_SPACE(4);
-			WRITE32(flav);
+			WRITE32(flavs[i].pseudoflavor);
 			ADJUST_ARGS();
 		}
 	}
+
 out:
 	if (exp)
 		exp_put(exp);
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 9bf59d0..cf051e0 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -44,17 +44,50 @@
 #ifndef __ACEXCEP_H__
 #define __ACEXCEP_H__
 
+/* This module contains all possible exception codes for acpi_status */
+
 /*
- * Exceptions returned by external ACPI interfaces
+ * Exception code classes
  */
-#define AE_CODE_ENVIRONMENTAL           0x0000
-#define AE_CODE_PROGRAMMER              0x1000
-#define AE_CODE_ACPI_TABLES             0x2000
-#define AE_CODE_AML                     0x3000
-#define AE_CODE_CONTROL                 0x4000
+#define AE_CODE_ENVIRONMENTAL           0x0000	/* General ACPICA environment */
+#define AE_CODE_PROGRAMMER              0x1000	/* External ACPICA interface caller */
+#define AE_CODE_ACPI_TABLES             0x2000	/* ACPI tables */
+#define AE_CODE_AML                     0x3000	/* From executing AML code */
+#define AE_CODE_CONTROL                 0x4000	/* Internal control codes */
+
 #define AE_CODE_MAX                     0x4000
 #define AE_CODE_MASK                    0xF000
 
+/*
+ * Macros to insert the exception code classes
+ */
+#define EXCEP_ENV(code)                 ((acpi_status) (code | AE_CODE_ENVIRONMENTAL))
+#define EXCEP_PGM(code)                 ((acpi_status) (code | AE_CODE_PROGRAMMER))
+#define EXCEP_TBL(code)                 ((acpi_status) (code | AE_CODE_ACPI_TABLES))
+#define EXCEP_AML(code)                 ((acpi_status) (code | AE_CODE_AML))
+#define EXCEP_CTL(code)                 ((acpi_status) (code | AE_CODE_CONTROL))
+
+/*
+ * Exception info table. The "Description" field is used only by the
+ * ACPICA help application (acpihelp).
+ */
+struct acpi_exception_info {
+	char *name;
+
+#ifdef ACPI_HELP_APP
+	char *description;
+#endif
+};
+
+#ifdef ACPI_HELP_APP
+#define EXCEP_TXT(name,description)     {name, description}
+#else
+#define EXCEP_TXT(name,description)     {name}
+#endif
+
+/*
+ * Success is always zero, failure is non-zero
+ */
 #define ACPI_SUCCESS(a)                 (!(a))
 #define ACPI_FAILURE(a)                 (a)
 
@@ -64,60 +97,60 @@
 /*
  * Environmental exceptions
  */
-#define AE_ERROR                        (acpi_status) (0x0001 | AE_CODE_ENVIRONMENTAL)
-#define AE_NO_ACPI_TABLES               (acpi_status) (0x0002 | AE_CODE_ENVIRONMENTAL)
-#define AE_NO_NAMESPACE                 (acpi_status) (0x0003 | AE_CODE_ENVIRONMENTAL)
-#define AE_NO_MEMORY                    (acpi_status) (0x0004 | AE_CODE_ENVIRONMENTAL)
-#define AE_NOT_FOUND                    (acpi_status) (0x0005 | AE_CODE_ENVIRONMENTAL)
-#define AE_NOT_EXIST                    (acpi_status) (0x0006 | AE_CODE_ENVIRONMENTAL)
-#define AE_ALREADY_EXISTS               (acpi_status) (0x0007 | AE_CODE_ENVIRONMENTAL)
-#define AE_TYPE                         (acpi_status) (0x0008 | AE_CODE_ENVIRONMENTAL)
-#define AE_NULL_OBJECT                  (acpi_status) (0x0009 | AE_CODE_ENVIRONMENTAL)
-#define AE_NULL_ENTRY                   (acpi_status) (0x000A | AE_CODE_ENVIRONMENTAL)
-#define AE_BUFFER_OVERFLOW              (acpi_status) (0x000B | AE_CODE_ENVIRONMENTAL)
-#define AE_STACK_OVERFLOW               (acpi_status) (0x000C | AE_CODE_ENVIRONMENTAL)
-#define AE_STACK_UNDERFLOW              (acpi_status) (0x000D | AE_CODE_ENVIRONMENTAL)
-#define AE_NOT_IMPLEMENTED              (acpi_status) (0x000E | AE_CODE_ENVIRONMENTAL)
-#define AE_SUPPORT                      (acpi_status) (0x000F | AE_CODE_ENVIRONMENTAL)
-#define AE_LIMIT                        (acpi_status) (0x0010 | AE_CODE_ENVIRONMENTAL)
-#define AE_TIME                         (acpi_status) (0x0011 | AE_CODE_ENVIRONMENTAL)
-#define AE_ACQUIRE_DEADLOCK             (acpi_status) (0x0012 | AE_CODE_ENVIRONMENTAL)
-#define AE_RELEASE_DEADLOCK             (acpi_status) (0x0013 | AE_CODE_ENVIRONMENTAL)
-#define AE_NOT_ACQUIRED                 (acpi_status) (0x0014 | AE_CODE_ENVIRONMENTAL)
-#define AE_ALREADY_ACQUIRED             (acpi_status) (0x0015 | AE_CODE_ENVIRONMENTAL)
-#define AE_NO_HARDWARE_RESPONSE         (acpi_status) (0x0016 | AE_CODE_ENVIRONMENTAL)
-#define AE_NO_GLOBAL_LOCK               (acpi_status) (0x0017 | AE_CODE_ENVIRONMENTAL)
-#define AE_ABORT_METHOD                 (acpi_status) (0x0018 | AE_CODE_ENVIRONMENTAL)
-#define AE_SAME_HANDLER                 (acpi_status) (0x0019 | AE_CODE_ENVIRONMENTAL)
-#define AE_NO_HANDLER                   (acpi_status) (0x001A | AE_CODE_ENVIRONMENTAL)
-#define AE_OWNER_ID_LIMIT               (acpi_status) (0x001B | AE_CODE_ENVIRONMENTAL)
-#define AE_NOT_CONFIGURED               (acpi_status) (0x001C | AE_CODE_ENVIRONMENTAL)
+#define AE_ERROR                        EXCEP_ENV (0x0001)
+#define AE_NO_ACPI_TABLES               EXCEP_ENV (0x0002)
+#define AE_NO_NAMESPACE                 EXCEP_ENV (0x0003)
+#define AE_NO_MEMORY                    EXCEP_ENV (0x0004)
+#define AE_NOT_FOUND                    EXCEP_ENV (0x0005)
+#define AE_NOT_EXIST                    EXCEP_ENV (0x0006)
+#define AE_ALREADY_EXISTS               EXCEP_ENV (0x0007)
+#define AE_TYPE                         EXCEP_ENV (0x0008)
+#define AE_NULL_OBJECT                  EXCEP_ENV (0x0009)
+#define AE_NULL_ENTRY                   EXCEP_ENV (0x000A)
+#define AE_BUFFER_OVERFLOW              EXCEP_ENV (0x000B)
+#define AE_STACK_OVERFLOW               EXCEP_ENV (0x000C)
+#define AE_STACK_UNDERFLOW              EXCEP_ENV (0x000D)
+#define AE_NOT_IMPLEMENTED              EXCEP_ENV (0x000E)
+#define AE_SUPPORT                      EXCEP_ENV (0x000F)
+#define AE_LIMIT                        EXCEP_ENV (0x0010)
+#define AE_TIME                         EXCEP_ENV (0x0011)
+#define AE_ACQUIRE_DEADLOCK             EXCEP_ENV (0x0012)
+#define AE_RELEASE_DEADLOCK             EXCEP_ENV (0x0013)
+#define AE_NOT_ACQUIRED                 EXCEP_ENV (0x0014)
+#define AE_ALREADY_ACQUIRED             EXCEP_ENV (0x0015)
+#define AE_NO_HARDWARE_RESPONSE         EXCEP_ENV (0x0016)
+#define AE_NO_GLOBAL_LOCK               EXCEP_ENV (0x0017)
+#define AE_ABORT_METHOD                 EXCEP_ENV (0x0018)
+#define AE_SAME_HANDLER                 EXCEP_ENV (0x0019)
+#define AE_NO_HANDLER                   EXCEP_ENV (0x001A)
+#define AE_OWNER_ID_LIMIT               EXCEP_ENV (0x001B)
+#define AE_NOT_CONFIGURED               EXCEP_ENV (0x001C)
 
 #define AE_CODE_ENV_MAX                 0x001C
 
 /*
  * Programmer exceptions
  */
-#define AE_BAD_PARAMETER                (acpi_status) (0x0001 | AE_CODE_PROGRAMMER)
-#define AE_BAD_CHARACTER                (acpi_status) (0x0002 | AE_CODE_PROGRAMMER)
-#define AE_BAD_PATHNAME                 (acpi_status) (0x0003 | AE_CODE_PROGRAMMER)
-#define AE_BAD_DATA                     (acpi_status) (0x0004 | AE_CODE_PROGRAMMER)
-#define AE_BAD_HEX_CONSTANT             (acpi_status) (0x0005 | AE_CODE_PROGRAMMER)
-#define AE_BAD_OCTAL_CONSTANT           (acpi_status) (0x0006 | AE_CODE_PROGRAMMER)
-#define AE_BAD_DECIMAL_CONSTANT         (acpi_status) (0x0007 | AE_CODE_PROGRAMMER)
-#define AE_MISSING_ARGUMENTS            (acpi_status) (0x0008 | AE_CODE_PROGRAMMER)
-#define AE_BAD_ADDRESS                  (acpi_status) (0x0009 | AE_CODE_PROGRAMMER)
+#define AE_BAD_PARAMETER                EXCEP_PGM (0x0001)
+#define AE_BAD_CHARACTER                EXCEP_PGM (0x0002)
+#define AE_BAD_PATHNAME                 EXCEP_PGM (0x0003)
+#define AE_BAD_DATA                     EXCEP_PGM (0x0004)
+#define AE_BAD_HEX_CONSTANT             EXCEP_PGM (0x0005)
+#define AE_BAD_OCTAL_CONSTANT           EXCEP_PGM (0x0006)
+#define AE_BAD_DECIMAL_CONSTANT         EXCEP_PGM (0x0007)
+#define AE_MISSING_ARGUMENTS            EXCEP_PGM (0x0008)
+#define AE_BAD_ADDRESS                  EXCEP_PGM (0x0009)
 
 #define AE_CODE_PGM_MAX                 0x0009
 
 /*
  * Acpi table exceptions
  */
-#define AE_BAD_SIGNATURE                (acpi_status) (0x0001 | AE_CODE_ACPI_TABLES)
-#define AE_BAD_HEADER                   (acpi_status) (0x0002 | AE_CODE_ACPI_TABLES)
-#define AE_BAD_CHECKSUM                 (acpi_status) (0x0003 | AE_CODE_ACPI_TABLES)
-#define AE_BAD_VALUE                    (acpi_status) (0x0004 | AE_CODE_ACPI_TABLES)
-#define AE_INVALID_TABLE_LENGTH         (acpi_status) (0x0005 | AE_CODE_ACPI_TABLES)
+#define AE_BAD_SIGNATURE                EXCEP_TBL (0x0001)
+#define AE_BAD_HEADER                   EXCEP_TBL (0x0002)
+#define AE_BAD_CHECKSUM                 EXCEP_TBL (0x0003)
+#define AE_BAD_VALUE                    EXCEP_TBL (0x0004)
+#define AE_INVALID_TABLE_LENGTH         EXCEP_TBL (0x0005)
 
 #define AE_CODE_TBL_MAX                 0x0005
 
@@ -125,58 +158,58 @@
  * AML exceptions. These are caused by problems with
  * the actual AML byte stream
  */
-#define AE_AML_BAD_OPCODE               (acpi_status) (0x0001 | AE_CODE_AML)
-#define AE_AML_NO_OPERAND               (acpi_status) (0x0002 | AE_CODE_AML)
-#define AE_AML_OPERAND_TYPE             (acpi_status) (0x0003 | AE_CODE_AML)
-#define AE_AML_OPERAND_VALUE            (acpi_status) (0x0004 | AE_CODE_AML)
-#define AE_AML_UNINITIALIZED_LOCAL      (acpi_status) (0x0005 | AE_CODE_AML)
-#define AE_AML_UNINITIALIZED_ARG        (acpi_status) (0x0006 | AE_CODE_AML)
-#define AE_AML_UNINITIALIZED_ELEMENT    (acpi_status) (0x0007 | AE_CODE_AML)
-#define AE_AML_NUMERIC_OVERFLOW         (acpi_status) (0x0008 | AE_CODE_AML)
-#define AE_AML_REGION_LIMIT             (acpi_status) (0x0009 | AE_CODE_AML)
-#define AE_AML_BUFFER_LIMIT             (acpi_status) (0x000A | AE_CODE_AML)
-#define AE_AML_PACKAGE_LIMIT            (acpi_status) (0x000B | AE_CODE_AML)
-#define AE_AML_DIVIDE_BY_ZERO           (acpi_status) (0x000C | AE_CODE_AML)
-#define AE_AML_BAD_NAME                 (acpi_status) (0x000D | AE_CODE_AML)
-#define AE_AML_NAME_NOT_FOUND           (acpi_status) (0x000E | AE_CODE_AML)
-#define AE_AML_INTERNAL                 (acpi_status) (0x000F | AE_CODE_AML)
-#define AE_AML_INVALID_SPACE_ID         (acpi_status) (0x0010 | AE_CODE_AML)
-#define AE_AML_STRING_LIMIT             (acpi_status) (0x0011 | AE_CODE_AML)
-#define AE_AML_NO_RETURN_VALUE          (acpi_status) (0x0012 | AE_CODE_AML)
-#define AE_AML_METHOD_LIMIT             (acpi_status) (0x0013 | AE_CODE_AML)
-#define AE_AML_NOT_OWNER                (acpi_status) (0x0014 | AE_CODE_AML)
-#define AE_AML_MUTEX_ORDER              (acpi_status) (0x0015 | AE_CODE_AML)
-#define AE_AML_MUTEX_NOT_ACQUIRED       (acpi_status) (0x0016 | AE_CODE_AML)
-#define AE_AML_INVALID_RESOURCE_TYPE    (acpi_status) (0x0017 | AE_CODE_AML)
-#define AE_AML_INVALID_INDEX            (acpi_status) (0x0018 | AE_CODE_AML)
-#define AE_AML_REGISTER_LIMIT           (acpi_status) (0x0019 | AE_CODE_AML)
-#define AE_AML_NO_WHILE                 (acpi_status) (0x001A | AE_CODE_AML)
-#define AE_AML_ALIGNMENT                (acpi_status) (0x001B | AE_CODE_AML)
-#define AE_AML_NO_RESOURCE_END_TAG      (acpi_status) (0x001C | AE_CODE_AML)
-#define AE_AML_BAD_RESOURCE_VALUE       (acpi_status) (0x001D | AE_CODE_AML)
-#define AE_AML_CIRCULAR_REFERENCE       (acpi_status) (0x001E | AE_CODE_AML)
-#define AE_AML_BAD_RESOURCE_LENGTH      (acpi_status) (0x001F | AE_CODE_AML)
-#define AE_AML_ILLEGAL_ADDRESS          (acpi_status) (0x0020 | AE_CODE_AML)
-#define AE_AML_INFINITE_LOOP            (acpi_status) (0x0021 | AE_CODE_AML)
+#define AE_AML_BAD_OPCODE               EXCEP_AML (0x0001)
+#define AE_AML_NO_OPERAND               EXCEP_AML (0x0002)
+#define AE_AML_OPERAND_TYPE             EXCEP_AML (0x0003)
+#define AE_AML_OPERAND_VALUE            EXCEP_AML (0x0004)
+#define AE_AML_UNINITIALIZED_LOCAL      EXCEP_AML (0x0005)
+#define AE_AML_UNINITIALIZED_ARG        EXCEP_AML (0x0006)
+#define AE_AML_UNINITIALIZED_ELEMENT    EXCEP_AML (0x0007)
+#define AE_AML_NUMERIC_OVERFLOW         EXCEP_AML (0x0008)
+#define AE_AML_REGION_LIMIT             EXCEP_AML (0x0009)
+#define AE_AML_BUFFER_LIMIT             EXCEP_AML (0x000A)
+#define AE_AML_PACKAGE_LIMIT            EXCEP_AML (0x000B)
+#define AE_AML_DIVIDE_BY_ZERO           EXCEP_AML (0x000C)
+#define AE_AML_BAD_NAME                 EXCEP_AML (0x000D)
+#define AE_AML_NAME_NOT_FOUND           EXCEP_AML (0x000E)
+#define AE_AML_INTERNAL                 EXCEP_AML (0x000F)
+#define AE_AML_INVALID_SPACE_ID         EXCEP_AML (0x0010)
+#define AE_AML_STRING_LIMIT             EXCEP_AML (0x0011)
+#define AE_AML_NO_RETURN_VALUE          EXCEP_AML (0x0012)
+#define AE_AML_METHOD_LIMIT             EXCEP_AML (0x0013)
+#define AE_AML_NOT_OWNER                EXCEP_AML (0x0014)
+#define AE_AML_MUTEX_ORDER              EXCEP_AML (0x0015)
+#define AE_AML_MUTEX_NOT_ACQUIRED       EXCEP_AML (0x0016)
+#define AE_AML_INVALID_RESOURCE_TYPE    EXCEP_AML (0x0017)
+#define AE_AML_INVALID_INDEX            EXCEP_AML (0x0018)
+#define AE_AML_REGISTER_LIMIT           EXCEP_AML (0x0019)
+#define AE_AML_NO_WHILE                 EXCEP_AML (0x001A)
+#define AE_AML_ALIGNMENT                EXCEP_AML (0x001B)
+#define AE_AML_NO_RESOURCE_END_TAG      EXCEP_AML (0x001C)
+#define AE_AML_BAD_RESOURCE_VALUE       EXCEP_AML (0x001D)
+#define AE_AML_CIRCULAR_REFERENCE       EXCEP_AML (0x001E)
+#define AE_AML_BAD_RESOURCE_LENGTH      EXCEP_AML (0x001F)
+#define AE_AML_ILLEGAL_ADDRESS          EXCEP_AML (0x0020)
+#define AE_AML_INFINITE_LOOP            EXCEP_AML (0x0021)
 
 #define AE_CODE_AML_MAX                 0x0021
 
 /*
  * Internal exceptions used for control
  */
-#define AE_CTRL_RETURN_VALUE            (acpi_status) (0x0001 | AE_CODE_CONTROL)
-#define AE_CTRL_PENDING                 (acpi_status) (0x0002 | AE_CODE_CONTROL)
-#define AE_CTRL_TERMINATE               (acpi_status) (0x0003 | AE_CODE_CONTROL)
-#define AE_CTRL_TRUE                    (acpi_status) (0x0004 | AE_CODE_CONTROL)
-#define AE_CTRL_FALSE                   (acpi_status) (0x0005 | AE_CODE_CONTROL)
-#define AE_CTRL_DEPTH                   (acpi_status) (0x0006 | AE_CODE_CONTROL)
-#define AE_CTRL_END                     (acpi_status) (0x0007 | AE_CODE_CONTROL)
-#define AE_CTRL_TRANSFER                (acpi_status) (0x0008 | AE_CODE_CONTROL)
-#define AE_CTRL_BREAK                   (acpi_status) (0x0009 | AE_CODE_CONTROL)
-#define AE_CTRL_CONTINUE                (acpi_status) (0x000A | AE_CODE_CONTROL)
-#define AE_CTRL_SKIP                    (acpi_status) (0x000B | AE_CODE_CONTROL)
-#define AE_CTRL_PARSE_CONTINUE          (acpi_status) (0x000C | AE_CODE_CONTROL)
-#define AE_CTRL_PARSE_PENDING           (acpi_status) (0x000D | AE_CODE_CONTROL)
+#define AE_CTRL_RETURN_VALUE            EXCEP_CTL (0x0001)
+#define AE_CTRL_PENDING                 EXCEP_CTL (0x0002)
+#define AE_CTRL_TERMINATE               EXCEP_CTL (0x0003)
+#define AE_CTRL_TRUE                    EXCEP_CTL (0x0004)
+#define AE_CTRL_FALSE                   EXCEP_CTL (0x0005)
+#define AE_CTRL_DEPTH                   EXCEP_CTL (0x0006)
+#define AE_CTRL_END                     EXCEP_CTL (0x0007)
+#define AE_CTRL_TRANSFER                EXCEP_CTL (0x0008)
+#define AE_CTRL_BREAK                   EXCEP_CTL (0x0009)
+#define AE_CTRL_CONTINUE                EXCEP_CTL (0x000A)
+#define AE_CTRL_SKIP                    EXCEP_CTL (0x000B)
+#define AE_CTRL_PARSE_CONTINUE          EXCEP_CTL (0x000C)
+#define AE_CTRL_PARSE_PENDING           EXCEP_CTL (0x000D)
 
 #define AE_CODE_CTRL_MAX                0x000D
 
@@ -188,112 +221,156 @@
  * String versions of the exception codes above
  * These strings must match the corresponding defines exactly
  */
-char const *acpi_gbl_exception_names_env[] = {
-	"AE_OK",
-	"AE_ERROR",
-	"AE_NO_ACPI_TABLES",
-	"AE_NO_NAMESPACE",
-	"AE_NO_MEMORY",
-	"AE_NOT_FOUND",
-	"AE_NOT_EXIST",
-	"AE_ALREADY_EXISTS",
-	"AE_TYPE",
-	"AE_NULL_OBJECT",
-	"AE_NULL_ENTRY",
-	"AE_BUFFER_OVERFLOW",
-	"AE_STACK_OVERFLOW",
-	"AE_STACK_UNDERFLOW",
-	"AE_NOT_IMPLEMENTED",
-	"AE_SUPPORT",
-	"AE_LIMIT",
-	"AE_TIME",
-	"AE_ACQUIRE_DEADLOCK",
-	"AE_RELEASE_DEADLOCK",
-	"AE_NOT_ACQUIRED",
-	"AE_ALREADY_ACQUIRED",
-	"AE_NO_HARDWARE_RESPONSE",
-	"AE_NO_GLOBAL_LOCK",
-	"AE_ABORT_METHOD",
-	"AE_SAME_HANDLER",
-	"AE_NO_HANDLER",
-	"AE_OWNER_ID_LIMIT",
-	"AE_NOT_CONFIGURED"
+static const struct acpi_exception_info acpi_gbl_exception_names_env[] = {
+	EXCEP_TXT("AE_OK", "No error"),
+	EXCEP_TXT("AE_ERROR", "Unspecified error"),
+	EXCEP_TXT("AE_NO_ACPI_TABLES", "ACPI tables could not be found"),
+	EXCEP_TXT("AE_NO_NAMESPACE", "A namespace has not been loaded"),
+	EXCEP_TXT("AE_NO_MEMORY", "Insufficient dynamic memory"),
+	EXCEP_TXT("AE_NOT_FOUND", "The name was not found in the namespace"),
+	EXCEP_TXT("AE_NOT_EXIST", "A required entity does not exist"),
+	EXCEP_TXT("AE_ALREADY_EXISTS", "An entity already exists"),
+	EXCEP_TXT("AE_TYPE", "The object type is incorrect"),
+	EXCEP_TXT("AE_NULL_OBJECT", "A required object was missing"),
+	EXCEP_TXT("AE_NULL_ENTRY", "The requested object does not exist"),
+	EXCEP_TXT("AE_BUFFER_OVERFLOW", "The buffer provided is too small"),
+	EXCEP_TXT("AE_STACK_OVERFLOW", "An internal stack overflowed"),
+	EXCEP_TXT("AE_STACK_UNDERFLOW", "An internal stack underflowed"),
+	EXCEP_TXT("AE_NOT_IMPLEMENTED", "The feature is not implemented"),
+	EXCEP_TXT("AE_SUPPORT", "The feature is not supported"),
+	EXCEP_TXT("AE_LIMIT", "A predefined limit was exceeded"),
+	EXCEP_TXT("AE_TIME", "A time limit or timeout expired"),
+	EXCEP_TXT("AE_ACQUIRE_DEADLOCK",
+		  "Internal error, attempt was made to acquire a mutex in improper order"),
+	EXCEP_TXT("AE_RELEASE_DEADLOCK",
+		  "Internal error, attempt was made to release a mutex in improper order"),
+	EXCEP_TXT("AE_NOT_ACQUIRED",
+		  "An attempt to release a mutex or Global Lock without a previous acquire"),
+	EXCEP_TXT("AE_ALREADY_ACQUIRED",
+		  "Internal error, attempt was made to acquire a mutex twice"),
+	EXCEP_TXT("AE_NO_HARDWARE_RESPONSE",
+		  "Hardware did not respond after an I/O operation"),
+	EXCEP_TXT("AE_NO_GLOBAL_LOCK", "There is no FACS Global Lock"),
+	EXCEP_TXT("AE_ABORT_METHOD", "A control method was aborted"),
+	EXCEP_TXT("AE_SAME_HANDLER",
+		  "Attempt was made to install the same handler that is already installed"),
+	EXCEP_TXT("AE_NO_HANDLER",
+		  "A handler for the operation is not installed"),
+	EXCEP_TXT("AE_OWNER_ID_LIMIT",
+		  "There are no more Owner IDs available for ACPI tables or control methods"),
+	EXCEP_TXT("AE_NOT_CONFIGURED",
+		  "The interface is not part of the current subsystem configuration")
 };
 
-char const *acpi_gbl_exception_names_pgm[] = {
-	NULL,
-	"AE_BAD_PARAMETER",
-	"AE_BAD_CHARACTER",
-	"AE_BAD_PATHNAME",
-	"AE_BAD_DATA",
-	"AE_BAD_HEX_CONSTANT",
-	"AE_BAD_OCTAL_CONSTANT",
-	"AE_BAD_DECIMAL_CONSTANT",
-	"AE_MISSING_ARGUMENTS",
-	"AE_BAD_ADDRESS"
+static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = {
+	EXCEP_TXT(NULL, NULL),
+	EXCEP_TXT("AE_BAD_PARAMETER", "A parameter is out of range or invalid"),
+	EXCEP_TXT("AE_BAD_CHARACTER",
+		  "An invalid character was found in a name"),
+	EXCEP_TXT("AE_BAD_PATHNAME",
+		  "An invalid character was found in a pathname"),
+	EXCEP_TXT("AE_BAD_DATA",
+		  "A package or buffer contained incorrect data"),
+	EXCEP_TXT("AE_BAD_HEX_CONSTANT", "Invalid character in a Hex constant"),
+	EXCEP_TXT("AE_BAD_OCTAL_CONSTANT",
+		  "Invalid character in an Octal constant"),
+	EXCEP_TXT("AE_BAD_DECIMAL_CONSTANT",
+		  "Invalid character in a Decimal constant"),
+	EXCEP_TXT("AE_MISSING_ARGUMENTS",
+		  "Too few arguments were passed to a control method"),
+	EXCEP_TXT("AE_BAD_ADDRESS", "An illegal null I/O address")
 };
 
-char const *acpi_gbl_exception_names_tbl[] = {
-	NULL,
-	"AE_BAD_SIGNATURE",
-	"AE_BAD_HEADER",
-	"AE_BAD_CHECKSUM",
-	"AE_BAD_VALUE",
-	"AE_INVALID_TABLE_LENGTH"
+static const struct acpi_exception_info acpi_gbl_exception_names_tbl[] = {
+	EXCEP_TXT(NULL, NULL),
+	EXCEP_TXT("AE_BAD_SIGNATURE", "An ACPI table has an invalid signature"),
+	EXCEP_TXT("AE_BAD_HEADER", "Invalid field in an ACPI table header"),
+	EXCEP_TXT("AE_BAD_CHECKSUM", "An ACPI table checksum is not correct"),
+	EXCEP_TXT("AE_BAD_VALUE", "An invalid value was found in a table"),
+	EXCEP_TXT("AE_INVALID_TABLE_LENGTH",
+		  "The FADT or FACS has improper length")
 };
 
-char const *acpi_gbl_exception_names_aml[] = {
-	NULL,
-	"AE_AML_BAD_OPCODE",
-	"AE_AML_NO_OPERAND",
-	"AE_AML_OPERAND_TYPE",
-	"AE_AML_OPERAND_VALUE",
-	"AE_AML_UNINITIALIZED_LOCAL",
-	"AE_AML_UNINITIALIZED_ARG",
-	"AE_AML_UNINITIALIZED_ELEMENT",
-	"AE_AML_NUMERIC_OVERFLOW",
-	"AE_AML_REGION_LIMIT",
-	"AE_AML_BUFFER_LIMIT",
-	"AE_AML_PACKAGE_LIMIT",
-	"AE_AML_DIVIDE_BY_ZERO",
-	"AE_AML_BAD_NAME",
-	"AE_AML_NAME_NOT_FOUND",
-	"AE_AML_INTERNAL",
-	"AE_AML_INVALID_SPACE_ID",
-	"AE_AML_STRING_LIMIT",
-	"AE_AML_NO_RETURN_VALUE",
-	"AE_AML_METHOD_LIMIT",
-	"AE_AML_NOT_OWNER",
-	"AE_AML_MUTEX_ORDER",
-	"AE_AML_MUTEX_NOT_ACQUIRED",
-	"AE_AML_INVALID_RESOURCE_TYPE",
-	"AE_AML_INVALID_INDEX",
-	"AE_AML_REGISTER_LIMIT",
-	"AE_AML_NO_WHILE",
-	"AE_AML_ALIGNMENT",
-	"AE_AML_NO_RESOURCE_END_TAG",
-	"AE_AML_BAD_RESOURCE_VALUE",
-	"AE_AML_CIRCULAR_REFERENCE",
-	"AE_AML_BAD_RESOURCE_LENGTH",
-	"AE_AML_ILLEGAL_ADDRESS",
-	"AE_AML_INFINITE_LOOP"
+static const struct acpi_exception_info acpi_gbl_exception_names_aml[] = {
+	EXCEP_TXT(NULL, NULL),
+	EXCEP_TXT("AE_AML_BAD_OPCODE", "Invalid AML opcode encountered"),
+	EXCEP_TXT("AE_AML_NO_OPERAND", "A required operand is missing"),
+	EXCEP_TXT("AE_AML_OPERAND_TYPE",
+		  "An operand of an incorrect type was encountered"),
+	EXCEP_TXT("AE_AML_OPERAND_VALUE",
+		  "The operand had an inappropriate or invalid value"),
+	EXCEP_TXT("AE_AML_UNINITIALIZED_LOCAL",
+		  "Method tried to use an uninitialized local variable"),
+	EXCEP_TXT("AE_AML_UNINITIALIZED_ARG",
+		  "Method tried to use an uninitialized argument"),
+	EXCEP_TXT("AE_AML_UNINITIALIZED_ELEMENT",
+		  "Method tried to use an empty package element"),
+	EXCEP_TXT("AE_AML_NUMERIC_OVERFLOW",
+		  "Overflow during BCD conversion or other"),
+	EXCEP_TXT("AE_AML_REGION_LIMIT",
+		  "Tried to access beyond the end of an Operation Region"),
+	EXCEP_TXT("AE_AML_BUFFER_LIMIT",
+		  "Tried to access beyond the end of a buffer"),
+	EXCEP_TXT("AE_AML_PACKAGE_LIMIT",
+		  "Tried to access beyond the end of a package"),
+	EXCEP_TXT("AE_AML_DIVIDE_BY_ZERO",
+		  "During execution of AML Divide operator"),
+	EXCEP_TXT("AE_AML_BAD_NAME",
+		  "An ACPI name contains invalid character(s)"),
+	EXCEP_TXT("AE_AML_NAME_NOT_FOUND",
+		  "Could not resolve a named reference"),
+	EXCEP_TXT("AE_AML_INTERNAL", "An internal error within the interprete"),
+	EXCEP_TXT("AE_AML_INVALID_SPACE_ID",
+		  "An Operation Region SpaceID is invalid"),
+	EXCEP_TXT("AE_AML_STRING_LIMIT",
+		  "String is longer than 200 characters"),
+	EXCEP_TXT("AE_AML_NO_RETURN_VALUE",
+		  "A method did not return a required value"),
+	EXCEP_TXT("AE_AML_METHOD_LIMIT",
+		  "A control method reached the maximum reentrancy limit of 255"),
+	EXCEP_TXT("AE_AML_NOT_OWNER",
+		  "A thread tried to release a mutex that it does not own"),
+	EXCEP_TXT("AE_AML_MUTEX_ORDER", "Mutex SyncLevel release mismatch"),
+	EXCEP_TXT("AE_AML_MUTEX_NOT_ACQUIRED",
+		  "Attempt to release a mutex that was not previously acquired"),
+	EXCEP_TXT("AE_AML_INVALID_RESOURCE_TYPE",
+		  "Invalid resource type in resource list"),
+	EXCEP_TXT("AE_AML_INVALID_INDEX",
+		  "Invalid Argx or Localx (x too large)"),
+	EXCEP_TXT("AE_AML_REGISTER_LIMIT",
+		  "Bank value or Index value beyond range of register"),
+	EXCEP_TXT("AE_AML_NO_WHILE", "Break or Continue without a While"),
+	EXCEP_TXT("AE_AML_ALIGNMENT",
+		  "Non-aligned memory transfer on platform that does not support this"),
+	EXCEP_TXT("AE_AML_NO_RESOURCE_END_TAG",
+		  "No End Tag in a resource list"),
+	EXCEP_TXT("AE_AML_BAD_RESOURCE_VALUE",
+		  "Invalid value of a resource element"),
+	EXCEP_TXT("AE_AML_CIRCULAR_REFERENCE",
+		  "Two references refer to each other"),
+	EXCEP_TXT("AE_AML_BAD_RESOURCE_LENGTH",
+		  "The length of a Resource Descriptor in the AML is incorrect"),
+	EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS",
+		  "A memory, I/O, or PCI configuration address is invalid"),
+	EXCEP_TXT("AE_AML_INFINITE_LOOP",
+		  "An apparent infinite AML While loop, method was aborted")
 };
 
-char const *acpi_gbl_exception_names_ctrl[] = {
-	NULL,
-	"AE_CTRL_RETURN_VALUE",
-	"AE_CTRL_PENDING",
-	"AE_CTRL_TERMINATE",
-	"AE_CTRL_TRUE",
-	"AE_CTRL_FALSE",
-	"AE_CTRL_DEPTH",
-	"AE_CTRL_END",
-	"AE_CTRL_TRANSFER",
-	"AE_CTRL_BREAK",
-	"AE_CTRL_CONTINUE",
-	"AE_CTRL_SKIP",
-	"AE_CTRL_PARSE_CONTINUE",
-	"AE_CTRL_PARSE_PENDING"
+static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = {
+	EXCEP_TXT(NULL, NULL),
+	EXCEP_TXT("AE_CTRL_RETURN_VALUE", "A Method returned a value"),
+	EXCEP_TXT("AE_CTRL_PENDING", "Method is calling another method"),
+	EXCEP_TXT("AE_CTRL_TERMINATE", "Terminate the executing method"),
+	EXCEP_TXT("AE_CTRL_TRUE", "An If or While predicate result"),
+	EXCEP_TXT("AE_CTRL_FALSE", "An If or While predicate result"),
+	EXCEP_TXT("AE_CTRL_DEPTH", "Maximum search depth has been reached"),
+	EXCEP_TXT("AE_CTRL_END", "An If or While predicate is false"),
+	EXCEP_TXT("AE_CTRL_TRANSFER", "Transfer control to called method"),
+	EXCEP_TXT("AE_CTRL_BREAK", "A Break has been executed"),
+	EXCEP_TXT("AE_CTRL_CONTINUE", "A Continue has been executed"),
+	EXCEP_TXT("AE_CTRL_SKIP", "Not currently used"),
+	EXCEP_TXT("AE_CTRL_PARSE_CONTINUE", "Used to skip over bad opcodes"),
+	EXCEP_TXT("AE_CTRL_PARSE_PENDING", "Used to implement AML While loops")
 };
 
 #endif				/* EXCEPTION_TABLE */
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 9885276..4f52ea7 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -324,9 +324,9 @@
 
 /* Helper macro */
 
-#define ACPI_TRACE_ENTRY(name, function, cast, param) \
+#define ACPI_TRACE_ENTRY(name, function, type, param) \
 	ACPI_FUNCTION_NAME (name) \
-	function (ACPI_DEBUG_PARAMETERS, cast (param))
+	function (ACPI_DEBUG_PARAMETERS, (type) (param))
 
 /* The actual entry trace macros */
 
@@ -335,13 +335,13 @@
 	acpi_ut_trace (ACPI_DEBUG_PARAMETERS)
 
 #define ACPI_FUNCTION_TRACE_PTR(name, pointer) \
-	ACPI_TRACE_ENTRY (name, acpi_ut_trace_ptr, (void *), pointer)
+	ACPI_TRACE_ENTRY (name, acpi_ut_trace_ptr, void *, pointer)
 
 #define ACPI_FUNCTION_TRACE_U32(name, value) \
-	ACPI_TRACE_ENTRY (name, acpi_ut_trace_u32, (u32), value)
+	ACPI_TRACE_ENTRY (name, acpi_ut_trace_u32, u32, value)
 
 #define ACPI_FUNCTION_TRACE_STR(name, string) \
-	ACPI_TRACE_ENTRY (name, acpi_ut_trace_str, (char *), string)
+	ACPI_TRACE_ENTRY (name, acpi_ut_trace_str, char *, string)
 
 #define ACPI_FUNCTION_ENTRY() \
 	acpi_ut_track_stack_ptr()
@@ -355,16 +355,37 @@
  *
  * One of the FUNCTION_TRACE macros above must be used in conjunction
  * with these macros so that "_AcpiFunctionName" is defined.
+ *
+ * There are two versions of most of the return macros. The default version is
+ * safer, since it avoids side-effects by guaranteeing that the argument will
+ * not be evaluated twice.
+ *
+ * A less-safe version of the macros is provided for optional use if the
+ * compiler uses excessive CPU stack (for example, this may happen in the
+ * debug case if code optimzation is disabled.)
  */
 
 /* Exit trace helper macro */
 
-#define ACPI_TRACE_EXIT(function, cast, param) \
+#ifndef ACPI_SIMPLE_RETURN_MACROS
+
+#define ACPI_TRACE_EXIT(function, type, param) \
 	ACPI_DO_WHILE0 ({ \
-		function (ACPI_DEBUG_PARAMETERS, cast (param)); \
-		return ((param)); \
+		register type _param = (type) (param); \
+		function (ACPI_DEBUG_PARAMETERS, _param); \
+		return (_param); \
 	})
 
+#else				/* Use original less-safe macros */
+
+#define ACPI_TRACE_EXIT(function, type, param) \
+	ACPI_DO_WHILE0 ({ \
+		function (ACPI_DEBUG_PARAMETERS, (type) (param)); \
+		return (param); \
+	})
+
+#endif				/* ACPI_SIMPLE_RETURN_MACROS */
+
 /* The actual exit macros */
 
 #define return_VOID \
@@ -374,13 +395,19 @@
 	})
 
 #define return_ACPI_STATUS(status) \
-	ACPI_TRACE_EXIT (acpi_ut_status_exit, (acpi_status), status)
+	ACPI_TRACE_EXIT (acpi_ut_status_exit, acpi_status, status)
 
 #define return_PTR(pointer) \
-	ACPI_TRACE_EXIT (acpi_ut_ptr_exit, (u8 *), pointer)
+	ACPI_TRACE_EXIT (acpi_ut_ptr_exit, void *, pointer)
 
 #define return_VALUE(value) \
-	ACPI_TRACE_EXIT (acpi_ut_value_exit, (u64), value)
+	ACPI_TRACE_EXIT (acpi_ut_value_exit, u64, value)
+
+#define return_UINT32(value) \
+	ACPI_TRACE_EXIT (acpi_ut_value_exit, u32, value)
+
+#define return_UINT8(value) \
+	ACPI_TRACE_EXIT (acpi_ut_value_exit, u8, value)
 
 /* Conditional execution */
 
@@ -428,8 +455,10 @@
 
 #define return_VOID                     return
 #define return_ACPI_STATUS(s)           return(s)
-#define return_VALUE(s)                 return(s)
 #define return_PTR(s)                   return(s)
+#define return_VALUE(s)                 return(s)
+#define return_UINT8(s)                 return(s)
+#define return_UINT32(s)                return(s)
 
 #endif				/* ACPI_DEBUG_OUTPUT */
 
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 22ba56e..98db31d 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -88,11 +88,30 @@
  * -----------------
  */
 
+enum acpi_hotplug_mode {
+	AHM_GENERIC = 0,
+	AHM_CONTAINER,
+	AHM_COUNT
+};
+
+struct acpi_hotplug_profile {
+	struct kobject kobj;
+	bool enabled:1;
+	enum acpi_hotplug_mode mode;
+};
+
+static inline struct acpi_hotplug_profile *to_acpi_hotplug_profile(
+						struct kobject *kobj)
+{
+	return container_of(kobj, struct acpi_hotplug_profile, kobj);
+}
+
 struct acpi_scan_handler {
 	const struct acpi_device_id *ids;
 	struct list_head list_node;
 	int (*attach)(struct acpi_device *dev, const struct acpi_device_id *id);
 	void (*detach)(struct acpi_device *dev);
+	struct acpi_hotplug_profile hotplug;
 };
 
 /*
@@ -142,7 +161,6 @@
 
 struct acpi_device_flags {
 	u32 dynamic_status:1;
-	u32 bus_address:1;
 	u32 removable:1;
 	u32 ejectable:1;
 	u32 suprise_removal_ok:1;
@@ -150,7 +168,7 @@
 	u32 performance_manageable:1;
 	u32 eject_pending:1;
 	u32 match_driver:1;
-	u32 reserved:23;
+	u32 reserved:24;
 };
 
 /* File System */
@@ -173,10 +191,17 @@
 	char *id;
 };
 
+struct acpi_pnp_type {
+	u32 hardware_id:1;
+	u32 bus_address:1;
+	u32 reserved:30;
+};
+
 struct acpi_device_pnp {
-	acpi_bus_id bus_id;	/* Object name */
+	acpi_bus_id bus_id;		/* Object name */
+	struct acpi_pnp_type type;	/* ID type */
 	acpi_bus_address bus_address;	/* _ADR */
-	char *unique_id;	/* _UID */
+	char *unique_id;		/* _UID */
 	struct list_head ids;		/* _HID and _CIDs */
 	acpi_device_name device_name;	/* Driver-determined */
 	acpi_device_class device_class;	/*        "          */
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 627749a..e6168a2 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -95,7 +95,6 @@
 struct pci_bus;
 
 struct pci_dev *acpi_get_pci_dev(acpi_handle);
-int acpi_pci_bind_root(struct acpi_device *device);
 
 /* Arch-defined function to add a bus to the system */
 
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 03322dd..454881e 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20130117
+#define ACPI_CA_VERSION                 0x20130328
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 77dc7a4..ffaac0e 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -72,11 +72,13 @@
 #define ACPI_SIG_IVRS           "IVRS"	/* I/O Virtualization Reporting Structure */
 #define ACPI_SIG_MCFG           "MCFG"	/* PCI Memory Mapped Configuration table */
 #define ACPI_SIG_MCHI           "MCHI"	/* Management Controller Host Interface table */
+#define ACPI_SIG_MTMR           "MTMR"	/* MID Timer table */
 #define ACPI_SIG_SLIC           "SLIC"	/* Software Licensing Description Table */
 #define ACPI_SIG_SPCR           "SPCR"	/* Serial Port Console Redirection table */
 #define ACPI_SIG_SPMI           "SPMI"	/* Server Platform Management Interface table */
 #define ACPI_SIG_TCPA           "TCPA"	/* Trusted Computing Platform Alliance table */
 #define ACPI_SIG_UEFI           "UEFI"	/* Uefi Boot Optimization Table */
+#define ACPI_SIG_VRTC           "VRTC"	/* Virtual Real Time Clock Table */
 #define ACPI_SIG_WAET           "WAET"	/* Windows ACPI Emulated devices Table */
 #define ACPI_SIG_WDAT           "WDAT"	/* Watchdog Action Table */
 #define ACPI_SIG_WDDT           "WDDT"	/* Watchdog Timer Description Table */
@@ -852,6 +854,29 @@
 
 /*******************************************************************************
  *
+ * MTMR - MID Timer Table
+ *        Version 1
+ *
+ * Conforms to "Simple Firmware Interface Specification",
+ * Draft 0.8.2, Oct 19, 2010
+ * NOTE: The ACPI MTMR is equivalent to the SFI MTMR table.
+ *
+ ******************************************************************************/
+
+struct acpi_table_mtmr {
+	struct acpi_table_header header;	/* Common ACPI table header */
+};
+
+/* MTMR entry */
+
+struct acpi_mtmr_entry {
+	struct acpi_generic_address physical_address;
+	u32 frequency;
+	u32 irq;
+};
+
+/*******************************************************************************
+ *
  * SLIC - Software Licensing Description Table
  *        Version 1
  *
@@ -1025,6 +1050,28 @@
 
 /*******************************************************************************
  *
+ * VRTC - Virtual Real Time Clock Table
+ *        Version 1
+ *
+ * Conforms to "Simple Firmware Interface Specification",
+ * Draft 0.8.2, Oct 19, 2010
+ * NOTE: The ACPI VRTC is equivalent to The SFI MRTC table.
+ *
+ ******************************************************************************/
+
+struct acpi_table_vrtc {
+	struct acpi_table_header header;	/* Common ACPI table header */
+};
+
+/* VRTC entry */
+
+struct acpi_vrtc_entry {
+	struct acpi_generic_address physical_address;
+	u32 irq;
+};
+
+/*******************************************************************************
+ *
  * WAET - Windows ACPI Emulated devices Table
  *        Version 1
  *
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 332b17e..e2c0931 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -174,7 +174,7 @@
 
 enum acpi_fpdt_type {
 	ACPI_FPDT_TYPE_BOOT = 0,
-	ACPI_FPDT_TYPE_S3PERF = 1,
+	ACPI_FPDT_TYPE_S3PERF = 1
 };
 
 /*
@@ -223,7 +223,7 @@
 
 enum acpi_s3pt_type {
 	ACPI_S3PT_TYPE_RESUME = 0,
-	ACPI_S3PT_TYPE_SUSPEND = 1,
+	ACPI_S3PT_TYPE_SUSPEND = 1
 };
 
 struct acpi_s3pt_resume {
@@ -505,26 +505,59 @@
 	u32 signature;
 	u16 command;
 	u16 status;
-	u64 requested_address;
-	u64 requested_length;
-	u64 actual_address;
-	u64 actual_length;
+	u16 version;
+	u8 capabilities[16];
+	u8 set_capabilities[16];
+	u16 num_parameter_blocks;
+	u32 set_capabilities_status;
+};
+
+/* RASF Parameter Block Structure Header */
+
+struct acpi_rasf_parameter_block {
+	u16 type;
+	u16 version;
+	u16 length;
+};
+
+/* RASF Parameter Block Structure for PATROL_SCRUB */
+
+struct acpi_rasf_patrol_scrub_parameter {
+	struct acpi_rasf_parameter_block header;
+	u16 patrol_scrub_command;
+	u64 requested_address_range[2];
+	u64 actual_address_range[2];
 	u16 flags;
-	u8 speed;
+	u8 requested_speed;
 };
 
 /* Masks for Flags and Speed fields above */
 
 #define ACPI_RASF_SCRUBBER_RUNNING      1
 #define ACPI_RASF_SPEED                 (7<<1)
+#define ACPI_RASF_SPEED_SLOW            (0<<1)
+#define ACPI_RASF_SPEED_MEDIUM          (4<<1)
+#define ACPI_RASF_SPEED_FAST            (7<<1)
 
 /* Channel Commands */
 
 enum acpi_rasf_commands {
-	ACPI_RASF_GET_RAS_CAPABILITIES = 1,
-	ACPI_RASF_GET_PATROL_PARAMETERS = 2,
-	ACPI_RASF_START_PATROL_SCRUBBER = 3,
-	ACPI_RASF_STOP_PATROL_SCRUBBER = 4
+	ACPI_RASF_EXECUTE_RASF_COMMAND = 1
+};
+
+/* Platform RAS Capabilities */
+
+enum acpi_rasf_capabiliities {
+	ACPI_HW_PATROL_SCRUB_SUPPORTED = 0,
+	ACPI_SW_PATROL_SCRUB_EXPOSED = 1
+};
+
+/* Patrol Scrub Commands */
+
+enum acpi_rasf_patrol_scrub_commands {
+	ACPI_RASF_GET_PATROL_PARAMETERS = 1,
+	ACPI_RASF_START_PATROL_SCRUBBER = 2,
+	ACPI_RASF_STOP_PATROL_SCRUBBER = 3
 };
 
 /* Channel Command flags */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 845e75f..a64adcc 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -650,13 +650,14 @@
  * The encoding of acpi_event_status is illustrated below.
  * Note that a set bit (1) indicates the property is TRUE
  * (e.g. if bit 0 is set then the event is enabled).
- * +-------------+-+-+-+
- * |   Bits 31:3 |2|1|0|
- * +-------------+-+-+-+
- *          |     | | |
- *          |     | | +- Enabled?
- *          |     | +--- Enabled for wake?
- *          |     +----- Set?
+ * +-------------+-+-+-+-+
+ * |   Bits 31:4 |3|2|1|0|
+ * +-------------+-+-+-+-+
+ *          |     | | | |
+ *          |     | | | +- Enabled?
+ *          |     | | +--- Enabled for wake?
+ *          |     | +----- Set?
+ *          |     +------- Has a handler?
  *          +----------- <Reserved>
  */
 typedef u32 acpi_event_status;
@@ -1128,7 +1129,6 @@
 	u16 object_size;
 	u16 max_depth;
 	u16 current_depth;
-	u16 link_offset;
 
 #ifdef ACPI_DBG_TRACK_ALLOCATIONS
 
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 03053ac..17b5b59 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -195,7 +195,7 @@
 #if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
 
 extern long acpi_video_get_capabilities(acpi_handle graphics_dev_handle);
-extern long acpi_is_video_device(struct acpi_device *device);
+extern long acpi_is_video_device(acpi_handle handle);
 extern void acpi_video_dmi_promote_vendor(void);
 extern void acpi_video_dmi_demote_vendor(void);
 extern int acpi_video_backlight_support(void);
@@ -208,7 +208,7 @@
 	return 0;
 }
 
-static inline long acpi_is_video_device(struct acpi_device *device)
+static inline long acpi_is_video_device(acpi_handle handle)
 {
 	return 0;
 }
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 464e229..963d714 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -8,6 +8,20 @@
 #ifndef _LINUX_CLOCKCHIPS_H
 #define _LINUX_CLOCKCHIPS_H
 
+/* Clock event notification values */
+enum clock_event_nofitiers {
+	CLOCK_EVT_NOTIFY_ADD,
+	CLOCK_EVT_NOTIFY_BROADCAST_ON,
+	CLOCK_EVT_NOTIFY_BROADCAST_OFF,
+	CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
+	CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
+	CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
+	CLOCK_EVT_NOTIFY_SUSPEND,
+	CLOCK_EVT_NOTIFY_RESUME,
+	CLOCK_EVT_NOTIFY_CPU_DYING,
+	CLOCK_EVT_NOTIFY_CPU_DEAD,
+};
+
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD
 
 #include <linux/clocksource.h>
@@ -26,20 +40,6 @@
 	CLOCK_EVT_MODE_RESUME,
 };
 
-/* Clock event notification values */
-enum clock_event_nofitiers {
-	CLOCK_EVT_NOTIFY_ADD,
-	CLOCK_EVT_NOTIFY_BROADCAST_ON,
-	CLOCK_EVT_NOTIFY_BROADCAST_OFF,
-	CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
-	CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
-	CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
-	CLOCK_EVT_NOTIFY_SUSPEND,
-	CLOCK_EVT_NOTIFY_RESUME,
-	CLOCK_EVT_NOTIFY_CPU_DYING,
-	CLOCK_EVT_NOTIFY_CPU_DEAD,
-};
-
 /*
  * Clock event features
  */
@@ -184,7 +184,7 @@
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 extern void clockevents_notify(unsigned long reason, void *arg);
 #else
-# define clockevents_notify(reason, arg) do { } while (0)
+static inline void clockevents_notify(unsigned long reason, void *arg) {}
 #endif
 
 #else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */
@@ -192,7 +192,7 @@
 static inline void clockevents_suspend(void) {}
 static inline void clockevents_resume(void) {}
 
-#define clockevents_notify(reason, arg) do { } while (0)
+static inline void clockevents_notify(unsigned long reason, void *arg) {}
 static inline int tick_check_broadcast_expired(void) { return 0; }
 
 #endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index a22944c..037d36a 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -106,6 +106,7 @@
 					 * governors are used */
 	unsigned int		policy; /* see above */
 	struct cpufreq_governor	*governor; /* see below */
+	void			*governor_data;
 
 	struct work_struct	update; /* if update_policy() needs to be
 					 * called, but you're in IRQ context */
@@ -178,9 +179,11 @@
  *                          CPUFREQ GOVERNORS                        *
  *********************************************************************/
 
-#define CPUFREQ_GOV_START  1
-#define CPUFREQ_GOV_STOP   2
-#define CPUFREQ_GOV_LIMITS 3
+#define CPUFREQ_GOV_START	1
+#define CPUFREQ_GOV_STOP	2
+#define CPUFREQ_GOV_LIMITS	3
+#define CPUFREQ_GOV_POLICY_INIT	4
+#define CPUFREQ_GOV_POLICY_EXIT	5
 
 struct cpufreq_governor {
 	char	name[CPUFREQ_NAME_LEN];
@@ -229,6 +232,13 @@
 	struct module           *owner;
 	char			name[CPUFREQ_NAME_LEN];
 	u8			flags;
+	/*
+	 * This should be set by platforms having multiple clock-domains, i.e.
+	 * supporting multiple policies. With this sysfs directories of governor
+	 * would be created in cpu/cpu<num>/cpufreq/ directory and so they can
+	 * use the same governor with different tunables for different clusters.
+	 */
+	bool			have_governor_per_policy;
 
 	/* needed by all drivers */
 	int	(*init)		(struct cpufreq_policy *policy);
@@ -268,8 +278,8 @@
 int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
 
 
-void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state);
-
+void cpufreq_notify_transition(struct cpufreq_policy *policy,
+		struct cpufreq_freqs *freqs, unsigned int state);
 
 static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max)
 {
@@ -329,6 +339,7 @@
  *********************************************************************/
 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
 int cpufreq_update_policy(unsigned int cpu);
+bool have_governor_per_policy(void);
 
 #ifdef CONFIG_CPU_FREQ
 /* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 480c14d..3c86faa 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -57,6 +57,7 @@
 /* Idle State Flags */
 #define CPUIDLE_FLAG_TIME_VALID	(0x01) /* is residency time measurable? */
 #define CPUIDLE_FLAG_COUPLED	(0x02) /* state applies to multiple cpus */
+#define CPUIDLE_FLAG_TIMER_STOP (0x04)  /* timer is stopped on this state */
 
 #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
 
@@ -104,8 +105,8 @@
 	struct module 		*owner;
 	int                     refcnt;
 
-	/* set to 1 to use the core cpuidle time keeping (for all states). */
-	unsigned int		en_core_tk_irqen:1;
+        /* used by the cpuidle framework to setup the broadcast timer */
+	unsigned int            bctimer:1;
 	/* states array must be ordered in decreasing power consumption */
 	struct cpuidle_state	states[CPUIDLE_STATE_MAX];
 	int			state_count;
@@ -122,17 +123,15 @@
 extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
 extern int cpuidle_register_device(struct cpuidle_device *dev);
 extern void cpuidle_unregister_device(struct cpuidle_device *dev);
-
+extern int cpuidle_register(struct cpuidle_driver *drv,
+			    const struct cpumask *const coupled_cpus);
+extern void cpuidle_unregister(struct cpuidle_driver *drv);
 extern void cpuidle_pause_and_lock(void);
 extern void cpuidle_resume_and_unlock(void);
 extern void cpuidle_pause(void);
 extern void cpuidle_resume(void);
 extern int cpuidle_enable_device(struct cpuidle_device *dev);
 extern void cpuidle_disable_device(struct cpuidle_device *dev);
-extern int cpuidle_wrap_enter(struct cpuidle_device *dev,
-				struct cpuidle_driver *drv, int index,
-				int (*enter)(struct cpuidle_device *dev,
-					struct cpuidle_driver *drv, int index));
 extern int cpuidle_play_dead(void);
 
 extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
@@ -151,7 +150,10 @@
 static inline int cpuidle_register_device(struct cpuidle_device *dev)
 {return -ENODEV; }
 static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { }
-
+static inline int cpuidle_register(struct cpuidle_driver *drv,
+				   const struct cpumask *const coupled_cpus)
+{return -ENODEV; }
+static inline void cpuidle_unregister(struct cpuidle_driver *drv) { }
 static inline void cpuidle_pause_and_lock(void) { }
 static inline void cpuidle_resume_and_unlock(void) { }
 static inline void cpuidle_pause(void) { }
@@ -159,11 +161,6 @@
 static inline int cpuidle_enable_device(struct cpuidle_device *dev)
 {return -ENODEV; }
 static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
-static inline int cpuidle_wrap_enter(struct cpuidle_device *dev,
-				struct cpuidle_driver *drv, int index,
-				int (*enter)(struct cpuidle_device *dev,
-					struct cpuidle_driver *drv, int index))
-{ return -ENODEV; }
 static inline int cpuidle_play_dead(void) {return -ENODEV; }
 #endif
 
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
index 9ead60b..3301b20 100644
--- a/include/linux/mfd/abx500.h
+++ b/include/linux/mfd/abx500.h
@@ -89,6 +89,11 @@
  *				points.
  * @maint_thres			This is the threshold where we stop reporting
  *				battery full while in maintenance, in per cent
+ * @pcut_enable:			Enable power cut feature in ab8505
+ * @pcut_max_time:		Max time threshold
+ * @pcut_flag_time:		Flagtime threshold
+ * @pcut_max_restart:		Max number of restarts
+ * @pcut_debounce_time:		Sets battery debounce time
  */
 struct abx500_fg_parameters {
 	int recovery_sleep_timer;
@@ -106,6 +111,11 @@
 	int battok_raising_th_sel1;
 	int user_cap_limit;
 	int maint_thres;
+	bool pcut_enable;
+	u8 pcut_max_time;
+	u8 pcut_flag_time;
+	u8 pcut_max_restart;
+	u8 pcut_debounce_time;
 };
 
 /**
@@ -173,11 +183,11 @@
 	int low_high_vol_lvl;
 	int battery_resistance;
 	int n_temp_tbl_elements;
-	struct abx500_res_to_temp *r_to_t_tbl;
+	const struct abx500_res_to_temp *r_to_t_tbl;
 	int n_v_cap_tbl_elements;
-	struct abx500_v_to_cap *v_to_cap_tbl;
+	const struct abx500_v_to_cap *v_to_cap_tbl;
 	int n_batres_tbl_elements;
-	struct batres_vs_temp *batres_tbl;
+	const struct batres_vs_temp *batres_tbl;
 };
 
 /**
@@ -236,7 +246,11 @@
  * @interval_not_charging charge alg cycle period time when not charging (sec)
  * @temp_hysteresis	temperature hysteresis
  * @gnd_lift_resistance	Battery ground to phone ground resistance (mOhm)
- * @maxi:		maximization parameters
+ * @n_chg_out_curr		number of elements in array chg_output_curr
+ * @n_chg_in_curr		number of elements in array chg_input_curr
+ * @chg_output_curr	charger output current level map
+ * @chg_input_curr		charger input current level map
+ * @maxi		maximization parameters
  * @cap_levels		capacity in percent for the different capacity levels
  * @bat_type		table of supported battery types
  * @chg_params		charger parameters
@@ -257,6 +271,7 @@
 	bool autopower_cfg;
 	bool ac_enabled;
 	bool usb_enabled;
+	bool usb_power_path;
 	bool no_maintenance;
 	bool capacity_scaling;
 	bool chg_unknown_bat;
@@ -270,6 +285,10 @@
 	int interval_not_charging;
 	int temp_hysteresis;
 	int gnd_lift_resistance;
+	int n_chg_out_curr;
+	int n_chg_in_curr;
+	int *chg_output_curr;
+	int *chg_input_curr;
 	const struct abx500_maxim_parameters *maxi;
 	const struct abx500_bm_capacity_levels *cap_levels;
 	struct abx500_battery_type *bat_type;
diff --git a/include/linux/mfd/abx500/ab8500-bm.h b/include/linux/mfd/abx500/ab8500-bm.h
index 8d35bfe..cc892a8 100644
--- a/include/linux/mfd/abx500/ab8500-bm.h
+++ b/include/linux/mfd/abx500/ab8500-bm.h
@@ -23,6 +23,7 @@
  * Bank : 0x5
  */
 #define AB8500_USB_LINE_STAT_REG	0x80
+#define AB8500_USB_LINE_CTRL2_REG	0x82
 #define AB8500_USB_LINK1_STAT_REG	0x94
 
 /*
@@ -33,7 +34,7 @@
 #define AB8500_CH_STATUS2_REG		0x01
 #define AB8500_CH_USBCH_STAT1_REG	0x02
 #define AB8500_CH_USBCH_STAT2_REG	0x03
-#define AB8500_CH_FSM_STAT_REG		0x04
+#define AB8540_CH_USBCH_STAT3_REG	0x04
 #define AB8500_CH_STAT_REG		0x05
 
 /*
@@ -69,6 +70,8 @@
 #define AB8500_USBCH_CTRL1_REG		0xC0
 #define AB8500_USBCH_CTRL2_REG		0xC1
 #define AB8500_USBCH_IPT_CRNTLVL_REG	0xC2
+#define AB8540_USB_PP_MODE_REG		0xC5
+#define AB8540_USB_PP_CHR_REG		0xC6
 
 /*
  * Gas Gauge register offsets
@@ -105,6 +108,7 @@
 #define AB8500_RTC_BACKUP_CHG_REG	0x0C
 #define AB8500_RTC_CC_CONF_REG		0x01
 #define AB8500_RTC_CTRL_REG		0x0B
+#define AB8500_RTC_CTRL1_REG		0x11
 
 /*
  * OTP register offsets
@@ -154,6 +158,7 @@
 #define CH_OP_CUR_LVL_1P4		0x0D
 #define CH_OP_CUR_LVL_1P5		0x0E
 #define CH_OP_CUR_LVL_1P6		0x0F
+#define CH_OP_CUR_LVL_2P		0x3F
 
 /* BTEMP High thermal limits */
 #define BTEMP_HIGH_TH_57_0		0x00
@@ -179,10 +184,25 @@
 #define BUP_ICH_SEL_300UA		0x08
 #define BUP_ICH_SEL_700UA		0x0C
 
-#define BUP_VCH_SEL_2P5V		0x00
-#define BUP_VCH_SEL_2P6V		0x01
-#define BUP_VCH_SEL_2P8V		0x02
-#define BUP_VCH_SEL_3P1V		0x03
+enum bup_vch_sel {
+	BUP_VCH_SEL_2P5V,
+	BUP_VCH_SEL_2P6V,
+	BUP_VCH_SEL_2P8V,
+	BUP_VCH_SEL_3P1V,
+	/*
+	 * Note that the following 5 values 2.7v, 2.9v, 3.0v, 3.2v, 3.3v
+	 * are only available on ab8540. You can't choose these 5
+	 * voltage on ab8500/ab8505/ab9540.
+	 */
+	BUP_VCH_SEL_2P7V,
+	BUP_VCH_SEL_2P9V,
+	BUP_VCH_SEL_3P0V,
+	BUP_VCH_SEL_3P2V,
+	BUP_VCH_SEL_3P3V,
+};
+
+#define BUP_VCH_RANGE		0x02
+#define VBUP33_VRTCN		0x01
 
 /* Battery OVV constants */
 #define BATT_OVV_ENA			0x02
@@ -228,6 +248,8 @@
 #define BAT_CTRL_20U_ENA		0x02
 #define BAT_CTRL_18U_ENA		0x01
 #define BAT_CTRL_16U_ENA		0x02
+#define BAT_CTRL_60U_ENA		0x01
+#define BAT_CTRL_120U_ENA		0x02
 #define BAT_CTRL_CMP_ENA		0x04
 #define FORCE_BAT_CTRL_CMP_HIGH		0x08
 #define BAT_CTRL_PULL_UP_ENA		0x10
@@ -235,6 +257,24 @@
 /* Battery type */
 #define BATTERY_UNKNOWN			00
 
+/* Registers for pcut feature in ab8505 and ab9540 */
+#define AB8505_RTC_PCUT_CTL_STATUS_REG	0x12
+#define AB8505_RTC_PCUT_TIME_REG	0x13
+#define AB8505_RTC_PCUT_MAX_TIME_REG	0x14
+#define AB8505_RTC_PCUT_FLAG_TIME_REG	0x15
+#define AB8505_RTC_PCUT_RESTART_REG	0x16
+#define AB8505_RTC_PCUT_DEBOUNCE_REG	0x17
+
+/* USB Power Path constants for ab8540 */
+#define BUS_VSYS_VOL_SELECT_MASK		0x06
+#define BUS_VSYS_VOL_SELECT_3P6V		0x00
+#define BUS_VSYS_VOL_SELECT_3P325V		0x02
+#define BUS_VSYS_VOL_SELECT_3P9V		0x04
+#define BUS_VSYS_VOL_SELECT_4P3V		0x06
+#define BUS_POWER_PATH_MODE_ENA			0x01
+#define BUS_PP_PRECHG_CURRENT_MASK		0x0E
+#define BUS_POWER_PATH_PRECHG_ENA		0x01
+
 /**
  * struct res_to_temp - defines one point in a temp to res curve. To
  * be used in battery packs that combines the identification resistor with a
@@ -283,6 +323,11 @@
  *				points.
  * @maint_thres			This is the threshold where we stop reporting
  *				battery full while in maintenance, in per cent
+ * @pcut_enable:			Enable power cut feature in ab8505
+ * @pcut_max_time:		Max time threshold
+ * @pcut_flag_time:		Flagtime threshold
+ * @pcut_max_restart:		Max number of restarts
+ * @pcut_debunce_time:	Sets battery debounce time
  */
 struct ab8500_fg_parameters {
 	int recovery_sleep_timer;
@@ -299,6 +344,11 @@
 	int battok_raising_th_sel1;
 	int user_cap_limit;
 	int maint_thres;
+	bool pcut_enable;
+	u8 pcut_max_time;
+	u8 pcut_flag_time;
+	u8 pcut_max_restart;
+	u8 pcut_debunce_time;
 };
 
 /**
@@ -415,6 +465,7 @@
 void ab8500_charger_usb_state_changed(u8 bm_usb_state, u16 mA);
 struct ab8500_btemp *ab8500_btemp_get(void);
 int ab8500_btemp_get_batctrl_temp(struct ab8500_btemp *btemp);
+int ab8500_btemp_get_temp(struct ab8500_btemp *btemp);
 struct ab8500_fg *ab8500_fg_get(void);
 int ab8500_fg_inst_curr_blocking(struct ab8500_fg *dev);
 int ab8500_fg_inst_curr_start(struct ab8500_fg *di);
diff --git a/include/linux/mfd/abx500/ab8500-gpadc.h b/include/linux/mfd/abx500/ab8500-gpadc.h
index 2529667..49ded00 100644
--- a/include/linux/mfd/abx500/ab8500-gpadc.h
+++ b/include/linux/mfd/abx500/ab8500-gpadc.h
@@ -4,32 +4,72 @@
  *
  * Author: Arun R Murthy <arun.murthy@stericsson.com>
  * Author: Daniel Willerud <daniel.willerud@stericsson.com>
+ * Author: M'boumba Cedric Madianga <cedric.madianga@stericsson.com>
  */
 
 #ifndef	_AB8500_GPADC_H
 #define _AB8500_GPADC_H
 
-/* GPADC source: From datasheet(ADCSwSel[4:0] in GPADCCtrl2) */
-#define BAT_CTRL	0x01
-#define BTEMP_BALL	0x02
-#define MAIN_CHARGER_V	0x03
-#define ACC_DETECT1	0x04
-#define ACC_DETECT2	0x05
-#define ADC_AUX1	0x06
-#define ADC_AUX2	0x07
-#define MAIN_BAT_V	0x08
-#define VBUS_V		0x09
-#define MAIN_CHARGER_C	0x0A
-#define USB_CHARGER_C	0x0B
-#define BK_BAT_V	0x0C
-#define DIE_TEMP	0x0D
+/* GPADC source: From datasheet(ADCSwSel[4:0] in GPADCCtrl2
+ * and ADCHwSel[4:0] in GPADCCtrl3 ) */
+#define BAT_CTRL		0x01
+#define BTEMP_BALL		0x02
+#define MAIN_CHARGER_V		0x03
+#define ACC_DETECT1		0x04
+#define ACC_DETECT2		0x05
+#define ADC_AUX1		0x06
+#define ADC_AUX2		0x07
+#define MAIN_BAT_V		0x08
+#define VBUS_V			0x09
+#define MAIN_CHARGER_C		0x0A
+#define USB_CHARGER_C		0x0B
+#define BK_BAT_V		0x0C
+#define DIE_TEMP		0x0D
+#define USB_ID			0x0E
+#define XTAL_TEMP		0x12
+#define VBAT_TRUE_MEAS		0x13
+#define BAT_CTRL_AND_IBAT	0x1C
+#define VBAT_MEAS_AND_IBAT	0x1D
+#define VBAT_TRUE_MEAS_AND_IBAT	0x1E
+#define BAT_TEMP_AND_IBAT	0x1F
+
+/* Virtual channel used only for ibat convertion to ampere
+ * Battery current conversion (ibat) cannot be requested as a single conversion
+ *  but it is always in combination with other input requests
+ */
+#define IBAT_VIRTUAL_CHANNEL		0xFF
+
+#define SAMPLE_1        1
+#define SAMPLE_4        4
+#define SAMPLE_8        8
+#define SAMPLE_16       16
+#define RISING_EDGE     0
+#define FALLING_EDGE    1
+
+/* Arbitrary ADC conversion type constants */
+#define ADC_SW				0
+#define ADC_HW				1
 
 struct ab8500_gpadc;
 
 struct ab8500_gpadc *ab8500_gpadc_get(char *name);
-int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 channel);
-int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel);
+int ab8500_gpadc_sw_hw_convert(struct ab8500_gpadc *gpadc, u8 channel,
+		u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type);
+static inline int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 channel)
+{
+	return ab8500_gpadc_sw_hw_convert(gpadc, channel,
+			SAMPLE_16, 0, 0, ADC_SW);
+}
+
+int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
+		u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type);
+int ab8500_gpadc_double_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
+		u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type,
+		int *ibat);
 int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc,
-    u8 channel, int ad_value);
+		u8 channel, int ad_value);
+void ab8540_gpadc_get_otp(struct ab8500_gpadc *gpadc,
+			u16 *vmain_l, u16 *vmain_h, u16 *btemp_l, u16 *btemp_h,
+			u16 *vbat_l, u16 *vbat_h, u16 *ibat_l, u16 *ibat_h);
 
 #endif /* _AB8500_GPADC_H */
diff --git a/include/linux/mfd/abx500/ab8500-sysctrl.h b/include/linux/mfd/abx500/ab8500-sysctrl.h
index ebf12e7..990bc93 100644
--- a/include/linux/mfd/abx500/ab8500-sysctrl.h
+++ b/include/linux/mfd/abx500/ab8500-sysctrl.h
@@ -12,6 +12,7 @@
 
 int ab8500_sysctrl_read(u16 reg, u8 *value);
 int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value);
+void ab8500_restart(char mode, const char *cmd);
 
 #else
 
@@ -40,6 +41,7 @@
 /* Configuration data for SysClkReq1RfClkBuf - SysClkReq8RfClkBuf */
 struct ab8500_sysctrl_platform_data {
 	u8 initial_req_buf_config[8];
+	u16 (*reboot_reason_code)(const char *cmd);
 };
 
 /* Registers */
@@ -299,4 +301,8 @@
 #define AB9540_SYSCLK12BUF4VALID_SYSCLK12BUF4VALID_MASK 0xFF
 #define AB9540_SYSCLK12BUF4VALID_SYSCLK12BUF4VALID_SHIFT 0
 
+#define AB8500_ENABLE_WD 0x1
+#define AB8500_KICK_WD 0x2
+#define AB8500_WD_RESTART_ON_EXPIRE 0x10
+
 #endif /* __AB8500_SYSCTRL_H */
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index 84f4494..fb1bf7d 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -362,6 +362,7 @@
 	u8 *oldmask;
 	int mask_size;
 	const int *irq_reg_offset;
+	int it_latchhier_num;
 };
 
 struct ab8500_regulator_platform_data;
@@ -505,6 +506,8 @@
 	return (is_ab9540(ab) && (ab->chip_id < AB8500_CUT2P0));
 }
 
+void ab8500_override_turn_on_stat(u8 mask, u8 set);
+
 #ifdef CONFIG_AB8500_DEBUG
 void ab8500_dump_all_banks(struct device *dev);
 void ab8500_debug_register_interrupt(int line);
diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
index d43ac0f..234c991 100644
--- a/include/linux/mfd/abx500/ux500_chargalg.h
+++ b/include/linux/mfd/abx500/ux500_chargalg.h
@@ -17,8 +17,11 @@
 
 struct ux500_charger_ops {
 	int (*enable) (struct ux500_charger *, int, int, int);
+	int (*check_enable) (struct ux500_charger *, int, int);
 	int (*kick_wd) (struct ux500_charger *);
 	int (*update_curr) (struct ux500_charger *, int);
+	int (*pp_enable) (struct ux500_charger *, bool);
+	int (*pre_chg_enable) (struct ux500_charger *, bool);
 };
 
 /**
@@ -29,6 +32,7 @@
  * @max_out_curr	maximum output charger current in mA
  * @enabled		indicates if this charger is used or not
  * @external		external charger unit (pm2xxx)
+ * @power_path		USB power path support
  */
 struct ux500_charger {
 	struct power_supply psy;
@@ -38,6 +42,9 @@
 	int wdt_refresh;
 	bool enabled;
 	bool external;
+	bool power_path;
 };
 
+extern struct blocking_notifier_head charger_notifier_list;
+
 #endif
diff --git a/include/linux/mfd/tps65090.h b/include/linux/mfd/tps65090.h
index 6694cf4..998628a 100644
--- a/include/linux/mfd/tps65090.h
+++ b/include/linux/mfd/tps65090.h
@@ -86,6 +86,11 @@
 
 struct tps65090_platform_data {
 	int irq_base;
+
+	char **supplied_to;
+	size_t num_supplicants;
+	int enable_low_current_chrg;
+
 	struct tps65090_regulator_plat_data *reg_pdata[TPS65090_REGULATOR_MAX];
 };
 
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 1cc2568..fc01d5c 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -59,11 +59,18 @@
 	pid_t l_pid;
 };
 
+#define NFS_IO_INPROGRESS 0
+struct nfs_io_counter {
+	unsigned long flags;
+	atomic_t io_count;
+};
+
 struct nfs_lock_context {
 	atomic_t count;
 	struct list_head list;
 	struct nfs_open_context *open_context;
 	struct nfs_lockowner lockowner;
+	struct nfs_io_counter io_count;
 };
 
 struct nfs4_state;
@@ -77,6 +84,7 @@
 	unsigned long flags;
 #define NFS_CONTEXT_ERROR_WRITE		(0)
 #define NFS_CONTEXT_RESEND_WRITES	(1)
+#define NFS_CONTEXT_BAD			(2)
 	int error;
 
 	struct list_head list;
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 6c6ed15..3b7fa2a 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -40,6 +40,7 @@
 #define NFS_CS_NORESVPORT	0		/* - use ephemeral src port */
 #define NFS_CS_DISCRTRY		1		/* - disconnect on RPC retry */
 #define NFS_CS_MIGRATION	2		/* - transparent state migr */
+#define NFS_CS_INFINITE_SLOTS	3		/* - don't limit TCP slots */
 	struct sockaddr_storage	cl_addr;	/* server identifier */
 	size_t			cl_addrlen;
 	char *			cl_hostname;	/* hostname of server */
@@ -197,5 +198,7 @@
 #define NFS_CAP_MTIME		(1U << 13)
 #define NFS_CAP_POSIX_LOCK	(1U << 14)
 #define NFS_CAP_UIDGID_NOMAP	(1U << 15)
+#define NFS_CAP_STATEID_NFSV41	(1U << 16)
+#define NFS_CAP_ATOMIC_OPEN_V1	(1U << 17)
 
 #endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 4b993d3..766c5bc 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -14,9 +14,6 @@
 #define NFS_DEF_FILE_IO_SIZE	(4096U)
 #define NFS_MIN_FILE_IO_SIZE	(1024U)
 
-/* Forward declaration for NFS v3 */
-struct nfs4_secinfo_flavors;
-
 struct nfs4_string {
 	unsigned int len;
 	char *data;
@@ -349,6 +346,7 @@
 	const u32 *		bitmask;
 	const u32 *		open_bitmap;
 	__u32			claim;
+	enum createmode4	createmode;
 };
 
 struct nfs_openres {
@@ -486,6 +484,7 @@
 	struct nfs_fh *		fh;
 	struct nfs_open_context *context;
 	struct nfs_lock_context *lock_context;
+	nfs4_stateid		stateid;
 	__u64			offset;
 	__u32			count;
 	unsigned int		pgbase;
@@ -507,6 +506,7 @@
 	struct nfs_fh *		fh;
 	struct nfs_open_context *context;
 	struct nfs_lock_context *lock_context;
+	nfs4_stateid		stateid;
 	__u64			offset;
 	__u32			count;
 	enum nfs3_stable_how	stable;
@@ -1050,25 +1050,14 @@
 	struct nfs4_fs_locations       *fs_locations;
 };
 
-struct nfs4_secinfo_oid {
-	unsigned int len;
-	char data[GSS_OID_MAX_LEN];
-};
-
-struct nfs4_secinfo_gss {
-	struct nfs4_secinfo_oid sec_oid4;
-	unsigned int qop4;
-	unsigned int service;
-};
-
-struct nfs4_secinfo_flavor {
-	unsigned int 		flavor;
-	struct nfs4_secinfo_gss	gss;
+struct nfs4_secinfo4 {
+	u32			flavor;
+	struct rpcsec_gss_info	flavor_info;
 };
 
 struct nfs4_secinfo_flavors {
-	unsigned int num_flavors;
-	struct nfs4_secinfo_flavor flavors[0];
+	unsigned int		num_flavors;
+	struct nfs4_secinfo4	flavors[0];
 };
 
 struct nfs4_secinfo_arg {
diff --git a/include/linux/platform_data/clk-lpss.h b/include/linux/platform_data/clk-lpss.h
new file mode 100644
index 0000000..528e73c
--- /dev/null
+++ b/include/linux/platform_data/clk-lpss.h
@@ -0,0 +1,18 @@
+/*
+ * Intel Low Power Subsystem clocks.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *          Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CLK_LPSS_H
+#define __CLK_LPSS_H
+
+extern int lpt_clk_init(void);
+
+#endif /* __CLK_LPSS_H */
diff --git a/include/linux/pm2301_charger.h b/include/linux/pm2301_charger.h
index fc3f026..85c16de 100644
--- a/include/linux/pm2301_charger.h
+++ b/include/linux/pm2301_charger.h
@@ -48,7 +48,7 @@
 	size_t num_supplicants;
 	int i2c_bus;
 	const char *label;
-	int irq_number;
+	int gpio_irq_number;
 	unsigned int lpn_gpio;
 	int irq_type;
 };
diff --git a/include/linux/power/ab8500.h b/include/linux/power/ab8500.h
new file mode 100644
index 0000000..cdbb6c2
--- /dev/null
+++ b/include/linux/power/ab8500.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) ST-Ericsson 2013
+ * Author: Hongbo Zhang <hongbo.zhang@linaro.com>
+ * License terms: GNU General Public License v2
+ */
+
+#ifndef PWR_AB8500_H
+#define PWR_AB8500_H
+
+extern const struct abx500_res_to_temp ab8500_temp_tbl_a_thermistor[];
+extern const int ab8500_temp_tbl_a_size;
+
+extern const struct abx500_res_to_temp ab8500_temp_tbl_b_thermistor[];
+extern const int ab8500_temp_tbl_b_size;
+
+#endif /* PWR_AB8500_H */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 002a99f..3828cef 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -171,6 +171,12 @@
 	char **supplied_to;
 	size_t num_supplicants;
 
+	char **supplied_from;
+	size_t num_supplies;
+#ifdef CONFIG_OF
+	struct device_node *of_node;
+#endif
+
 	int (*get_property)(struct power_supply *psy,
 			    enum power_supply_property psp,
 			    union power_supply_propval *val);
diff --git a/include/linux/security.h b/include/linux/security.h
index 032c366..4686491 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1440,7 +1440,7 @@
 			     struct path *new_path);
 	int (*sb_set_mnt_opts) (struct super_block *sb,
 				struct security_mnt_opts *opts);
-	void (*sb_clone_mnt_opts) (const struct super_block *oldsb,
+	int (*sb_clone_mnt_opts) (const struct super_block *oldsb,
 				   struct super_block *newsb);
 	int (*sb_parse_opts_str) (char *options, struct security_mnt_opts *opts);
 
@@ -1726,7 +1726,7 @@
 int security_sb_umount(struct vfsmount *mnt, int flags);
 int security_sb_pivotroot(struct path *old_path, struct path *new_path);
 int security_sb_set_mnt_opts(struct super_block *sb, struct security_mnt_opts *opts);
-void security_sb_clone_mnt_opts(const struct super_block *oldsb,
+int security_sb_clone_mnt_opts(const struct super_block *oldsb,
 				struct super_block *newsb);
 int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts);
 
@@ -2016,9 +2016,11 @@
 	return 0;
 }
 
-static inline void security_sb_clone_mnt_opts(const struct super_block *oldsb,
+static inline int security_sb_clone_mnt_opts(const struct super_block *oldsb,
 					      struct super_block *newsb)
-{ }
+{
+	return 0;
+}
 
 static inline int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts)
 {
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 58fda1c..0dd00f4 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -22,6 +22,8 @@
 /* size of the nodename buffer */
 #define UNX_MAXNODENAME	32
 
+struct rpcsec_gss_info;
+
 /* Work around the lack of a VFS credential */
 struct auth_cred {
 	kuid_t	uid;
@@ -103,6 +105,9 @@
 	int			(*pipes_create)(struct rpc_auth *);
 	void			(*pipes_destroy)(struct rpc_auth *);
 	int			(*list_pseudoflavors)(rpc_authflavor_t *, int);
+	rpc_authflavor_t	(*info2flavor)(struct rpcsec_gss_info *);
+	int			(*flavor2info)(rpc_authflavor_t,
+						struct rpcsec_gss_info *);
 };
 
 struct rpc_credops {
@@ -137,6 +142,10 @@
 int			rpcauth_unregister(const struct rpc_authops *);
 struct rpc_auth *	rpcauth_create(rpc_authflavor_t, struct rpc_clnt *);
 void			rpcauth_release(struct rpc_auth *);
+rpc_authflavor_t	rpcauth_get_pseudoflavor(rpc_authflavor_t,
+				struct rpcsec_gss_info *);
+int			rpcauth_get_gssinfo(rpc_authflavor_t,
+				struct rpcsec_gss_info *);
 int			rpcauth_list_flavors(rpc_authflavor_t *, int);
 struct rpc_cred *	rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int);
 void			rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *);
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 2cf4ffa..e7d492c 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -124,6 +124,7 @@
 #define RPC_CLNT_CREATE_NOPING		(1UL << 4)
 #define RPC_CLNT_CREATE_DISCRTRY	(1UL << 5)
 #define RPC_CLNT_CREATE_QUIET		(1UL << 6)
+#define RPC_CLNT_CREATE_INFINITE_SLOTS	(1UL << 7)
 
 struct rpc_clnt *rpc_create(struct rpc_create_args *args);
 struct rpc_clnt	*rpc_bind_new_program(struct rpc_clnt *,
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index a19e254..f32b7a4 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -25,10 +25,21 @@
 
 #define GSS_C_NO_BUFFER		((struct xdr_netobj) 0)
 #define GSS_C_NO_CONTEXT	((struct gss_ctx *) 0)
-#define GSS_C_NULL_OID		((struct xdr_netobj) 0)
+#define GSS_C_QOP_DEFAULT	(0)
 
 /*XXX  arbitrary length - is this set somewhere? */
 #define GSS_OID_MAX_LEN 32
+struct rpcsec_gss_oid {
+	unsigned int	len;
+	u8		data[GSS_OID_MAX_LEN];
+};
+
+/* From RFC 3530 */
+struct rpcsec_gss_info {
+	struct rpcsec_gss_oid	oid;
+	u32			qop;
+	u32			service;
+};
 
 /* gss-api prototypes; note that these are somewhat simplified versions of
  * the prototypes specified in RFC 2744. */
@@ -58,12 +69,14 @@
 u32 gss_delete_sec_context(
 		struct gss_ctx		**ctx_id);
 
-u32 gss_svc_to_pseudoflavor(struct gss_api_mech *, u32 service);
+rpc_authflavor_t gss_svc_to_pseudoflavor(struct gss_api_mech *, u32 qop,
+					u32 service);
 u32 gss_pseudoflavor_to_service(struct gss_api_mech *, u32 pseudoflavor);
 char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service);
 
 struct pf_desc {
 	u32	pseudoflavor;
+	u32	qop;
 	u32	service;
 	char	*name;
 	char	*auth_domain_name;
@@ -76,7 +89,7 @@
 struct gss_api_mech {
 	struct list_head	gm_list;
 	struct module		*gm_owner;
-	struct xdr_netobj	gm_oid;
+	struct rpcsec_gss_oid	gm_oid;
 	char			*gm_name;
 	const struct gss_api_ops *gm_ops;
 	/* pseudoflavors supported by this mechanism: */
@@ -117,9 +130,11 @@
 int gss_mech_register(struct gss_api_mech *);
 void gss_mech_unregister(struct gss_api_mech *);
 
-/* returns a mechanism descriptor given an OID, and increments the mechanism's
- * reference count. */
-struct gss_api_mech * gss_mech_get_by_OID(struct xdr_netobj *);
+/* Given a GSS security tuple, look up a pseudoflavor */
+rpc_authflavor_t gss_mech_info2flavor(struct rpcsec_gss_info *);
+
+/* Given a pseudoflavor, look up a GSS security tuple */
+int gss_mech_flavor2info(rpc_authflavor_t, struct rpcsec_gss_info *);
 
 /* Returns a reference to a mechanism, given a name like "krb5" etc. */
 struct gss_api_mech *gss_mech_get_by_name(const char *);
@@ -130,9 +145,6 @@
 /* Fill in an array with a list of supported pseudoflavors */
 int gss_mech_list_pseudoflavors(rpc_authflavor_t *, int);
 
-/* Just increments the mechanism's reference count and returns its input: */
-struct gss_api_mech * gss_mech_get(struct gss_api_mech *);
-
 /* For every successful gss_mech_get or gss_mech_get_by_* call there must be a
  * corresponding call to gss_mech_put. */
 void gss_mech_put(struct gss_api_mech *);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 30834be..ff53924 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -255,6 +255,8 @@
 }
 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
 
+#define XPRT_CREATE_INFINITE_SLOTS	(1U)
+
 struct xprt_create {
 	int			ident;		/* XPRT_TRANSPORT identifier */
 	struct net *		net;
@@ -263,6 +265,7 @@
 	size_t			addrlen;
 	const char		*servername;
 	struct svc_xprt		*bc_xprt;	/* NFSv4.1 backchannel */
+	unsigned int		flags;
 };
 
 struct xprt_class {
@@ -279,6 +282,7 @@
 struct rpc_xprt		*xprt_create_transport(struct xprt_create *args);
 void			xprt_connect(struct rpc_task *task);
 void			xprt_reserve(struct rpc_task *task);
+void			xprt_retry_reserve(struct rpc_task *task);
 int			xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
 int			xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
 void			xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
@@ -334,6 +338,7 @@
 #define XPRT_CLOSING		(6)
 #define XPRT_CONNECTION_ABORT	(7)
 #define XPRT_CONNECTION_CLOSE	(8)
+#define XPRT_CONGESTED		(9)
 
 static inline void xprt_set_connected(struct rpc_xprt *xprt)
 {
diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h
index 7581874..ea7168a 100644
--- a/include/linux/vexpress.h
+++ b/include/linux/vexpress.h
@@ -115,9 +115,6 @@
 void vexpress_sysreg_early_init(void __iomem *base);
 void vexpress_sysreg_of_early_init(void);
 
-void vexpress_power_off(void);
-void vexpress_restart(char str, const char *cmd);
-
 /* Clocks */
 
 struct clk *vexpress_osc_setup(struct device *dev);
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 917741b..fe7f06c 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -63,6 +63,12 @@
 	ISCSI_UEVENT_PING		= UEVENT_BASE + 22,
 	ISCSI_UEVENT_GET_CHAP		= UEVENT_BASE + 23,
 	ISCSI_UEVENT_DELETE_CHAP	= UEVENT_BASE + 24,
+	ISCSI_UEVENT_SET_FLASHNODE_PARAMS	= UEVENT_BASE + 25,
+	ISCSI_UEVENT_NEW_FLASHNODE	= UEVENT_BASE + 26,
+	ISCSI_UEVENT_DEL_FLASHNODE	= UEVENT_BASE + 27,
+	ISCSI_UEVENT_LOGIN_FLASHNODE	= UEVENT_BASE + 28,
+	ISCSI_UEVENT_LOGOUT_FLASHNODE	= UEVENT_BASE + 29,
+	ISCSI_UEVENT_LOGOUT_FLASHNODE_SID	= UEVENT_BASE + 30,
 
 	/* up events */
 	ISCSI_KEVENT_RECV_PDU		= KEVENT_BASE + 1,
@@ -210,6 +216,31 @@
 		       uint32_t        host_no;
 		       uint16_t        chap_tbl_idx;
 		} delete_chap;
+		struct msg_set_flashnode_param {
+			uint32_t	host_no;
+			uint32_t	flashnode_idx;
+			uint32_t	count;
+		} set_flashnode;
+		struct msg_new_flashnode {
+			uint32_t	host_no;
+			uint32_t	len;
+		} new_flashnode;
+		struct msg_del_flashnode {
+			uint32_t	host_no;
+			uint32_t	flashnode_idx;
+		} del_flashnode;
+		struct msg_login_flashnode {
+			uint32_t	host_no;
+			uint32_t	flashnode_idx;
+		} login_flashnode;
+		struct msg_logout_flashnode {
+			uint32_t	host_no;
+			uint32_t	flashnode_idx;
+		} logout_flashnode;
+		struct msg_logout_flashnode_sid {
+			uint32_t	host_no;
+			uint32_t	sid;
+		} logout_flashnode_sid;
 	} u;
 	union {
 		/* messages k -> u */
@@ -267,6 +298,9 @@
 						   with each ping request */
 			uint32_t        data_size;
 		} ping_comp;
+		struct msg_new_flashnode_ret {
+			uint32_t	flashnode_idx;
+		} new_flashnode_ret;
 	} r;
 } __attribute__ ((aligned (sizeof(uint64_t))));
 
@@ -274,6 +308,7 @@
 	ISCSI_PARAM,		/* iscsi_param (session, conn, target, LU) */
 	ISCSI_HOST_PARAM,	/* iscsi_host_param */
 	ISCSI_NET_PARAM,	/* iscsi_net_param */
+	ISCSI_FLASHNODE_PARAM,	/* iscsi_flashnode_param */
 };
 
 struct iscsi_iface_param_info {
@@ -469,6 +504,88 @@
 	ISCSI_HOST_PARAM_MAX,
 };
 
+/* portal type */
+#define PORTAL_TYPE_IPV4	"ipv4"
+#define PORTAL_TYPE_IPV6	"ipv6"
+
+/* iSCSI Flash Target params */
+enum iscsi_flashnode_param {
+	ISCSI_FLASHNODE_IS_FW_ASSIGNED_IPV6,
+	ISCSI_FLASHNODE_PORTAL_TYPE,
+	ISCSI_FLASHNODE_AUTO_SND_TGT_DISABLE,
+	ISCSI_FLASHNODE_DISCOVERY_SESS,
+	ISCSI_FLASHNODE_ENTRY_EN,
+	ISCSI_FLASHNODE_HDR_DGST_EN,
+	ISCSI_FLASHNODE_DATA_DGST_EN,
+	ISCSI_FLASHNODE_IMM_DATA_EN,
+	ISCSI_FLASHNODE_INITIAL_R2T_EN,
+	ISCSI_FLASHNODE_DATASEQ_INORDER,
+	ISCSI_FLASHNODE_PDU_INORDER,
+	ISCSI_FLASHNODE_CHAP_AUTH_EN,
+	ISCSI_FLASHNODE_SNACK_REQ_EN,
+	ISCSI_FLASHNODE_DISCOVERY_LOGOUT_EN,
+	ISCSI_FLASHNODE_BIDI_CHAP_EN,
+	/* make authentication for discovery sessions optional */
+	ISCSI_FLASHNODE_DISCOVERY_AUTH_OPTIONAL,
+	ISCSI_FLASHNODE_ERL,
+	ISCSI_FLASHNODE_TCP_TIMESTAMP_STAT,
+	ISCSI_FLASHNODE_TCP_NAGLE_DISABLE,
+	ISCSI_FLASHNODE_TCP_WSF_DISABLE,
+	ISCSI_FLASHNODE_TCP_TIMER_SCALE,
+	ISCSI_FLASHNODE_TCP_TIMESTAMP_EN,
+	ISCSI_FLASHNODE_IP_FRAG_DISABLE,
+	ISCSI_FLASHNODE_MAX_RECV_DLENGTH,
+	ISCSI_FLASHNODE_MAX_XMIT_DLENGTH,
+	ISCSI_FLASHNODE_FIRST_BURST,
+	ISCSI_FLASHNODE_DEF_TIME2WAIT,
+	ISCSI_FLASHNODE_DEF_TIME2RETAIN,
+	ISCSI_FLASHNODE_MAX_R2T,
+	ISCSI_FLASHNODE_KEEPALIVE_TMO,
+	ISCSI_FLASHNODE_ISID,
+	ISCSI_FLASHNODE_TSID,
+	ISCSI_FLASHNODE_PORT,
+	ISCSI_FLASHNODE_MAX_BURST,
+	ISCSI_FLASHNODE_DEF_TASKMGMT_TMO,
+	ISCSI_FLASHNODE_IPADDR,
+	ISCSI_FLASHNODE_ALIAS,
+	ISCSI_FLASHNODE_REDIRECT_IPADDR,
+	ISCSI_FLASHNODE_MAX_SEGMENT_SIZE,
+	ISCSI_FLASHNODE_LOCAL_PORT,
+	ISCSI_FLASHNODE_IPV4_TOS,
+	ISCSI_FLASHNODE_IPV6_TC,
+	ISCSI_FLASHNODE_IPV6_FLOW_LABEL,
+	ISCSI_FLASHNODE_NAME,
+	ISCSI_FLASHNODE_TPGT,
+	ISCSI_FLASHNODE_LINK_LOCAL_IPV6,
+	ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX,
+	ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE,
+	ISCSI_FLASHNODE_TCP_XMIT_WSF,
+	ISCSI_FLASHNODE_TCP_RECV_WSF,
+	ISCSI_FLASHNODE_CHAP_IN_IDX,
+	ISCSI_FLASHNODE_CHAP_OUT_IDX,
+	ISCSI_FLASHNODE_USERNAME,
+	ISCSI_FLASHNODE_USERNAME_IN,
+	ISCSI_FLASHNODE_PASSWORD,
+	ISCSI_FLASHNODE_PASSWORD_IN,
+	ISCSI_FLASHNODE_STATSN,
+	ISCSI_FLASHNODE_EXP_STATSN,
+	ISCSI_FLASHNODE_IS_BOOT_TGT,
+
+	ISCSI_FLASHNODE_MAX,
+};
+
+struct iscsi_flashnode_param_info {
+	uint32_t len;		/* Actual length of the param */
+	uint16_t param;		/* iscsi param value */
+	uint8_t value[0];	/* length sized value follows */
+} __packed;
+
+enum iscsi_discovery_parent_type {
+	ISCSI_DISC_PARENT_UNKNOWN	= 0x1,
+	ISCSI_DISC_PARENT_SENDTGT	= 0x2,
+	ISCSI_DISC_PARENT_ISNS		= 0x3,
+};
+
 /* iSCSI port Speed */
 enum iscsi_port_speed {
 	ISCSI_PORT_SPEED_UNKNOWN	= 0x1,
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 6e33386..09c041e 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -427,6 +427,7 @@
  */
 extern void iscsi_pool_free(struct iscsi_pool *);
 extern int iscsi_pool_init(struct iscsi_pool *, int, void ***, int);
+extern int iscsi_switch_str_param(char **, char *);
 
 /*
  * inline functions to deal with padding.
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 53f0b36..4a58cca 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -39,6 +39,8 @@
 struct sockaddr;
 struct iscsi_iface;
 struct bsg_job;
+struct iscsi_bus_flash_session;
+struct iscsi_bus_flash_conn;
 
 /**
  * struct iscsi_transport - iSCSI Transport template
@@ -150,6 +152,19 @@
 	int (*get_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx,
 			 uint32_t *num_entries, char *buf);
 	int (*delete_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx);
+	int (*get_flashnode_param) (struct iscsi_bus_flash_session *fnode_sess,
+				    int param, char *buf);
+	int (*set_flashnode_param) (struct iscsi_bus_flash_session *fnode_sess,
+				    struct iscsi_bus_flash_conn *fnode_conn,
+				    void *data, int len);
+	int (*new_flashnode) (struct Scsi_Host *shost, const char *buf,
+			      int len);
+	int (*del_flashnode) (struct iscsi_bus_flash_session *fnode_sess);
+	int (*login_flashnode) (struct iscsi_bus_flash_session *fnode_sess,
+				struct iscsi_bus_flash_conn *fnode_conn);
+	int (*logout_flashnode) (struct iscsi_bus_flash_session *fnode_sess,
+				 struct iscsi_bus_flash_conn *fnode_conn);
+	int (*logout_flashnode_sid) (struct iscsi_cls_session *cls_sess);
 };
 
 /*
@@ -286,6 +301,112 @@
 #define iscsi_iface_to_shost(_iface) \
 	dev_to_shost(_iface->dev.parent)
 
+
+struct iscsi_bus_flash_conn {
+	struct list_head conn_list;	/* item in connlist */
+	void *dd_data;			/* LLD private data */
+	struct iscsi_transport *transport;
+	struct device dev;		/* sysfs transport/container device */
+	/* iscsi connection parameters */
+	uint32_t		exp_statsn;
+	uint32_t		statsn;
+	unsigned		max_recv_dlength; /* initiator_max_recv_dsl*/
+	unsigned		max_xmit_dlength; /* target_max_recv_dsl */
+	unsigned		max_segment_size;
+	unsigned		tcp_xmit_wsf;
+	unsigned		tcp_recv_wsf;
+	int			hdrdgst_en;
+	int			datadgst_en;
+	int			port;
+	char			*ipaddress;
+	char			*link_local_ipv6_addr;
+	char			*redirect_ipaddr;
+	uint16_t		keepalive_timeout;
+	uint16_t		local_port;
+	uint8_t			snack_req_en;
+	/* tcp timestamp negotiation status */
+	uint8_t			tcp_timestamp_stat;
+	uint8_t			tcp_nagle_disable;
+	/* tcp window scale factor */
+	uint8_t			tcp_wsf_disable;
+	uint8_t			tcp_timer_scale;
+	uint8_t			tcp_timestamp_en;
+	uint8_t			ipv4_tos;
+	uint8_t			ipv6_traffic_class;
+	uint8_t			ipv6_flow_label;
+	uint8_t			fragment_disable;
+	/* Link local IPv6 address is assigned by firmware or driver */
+	uint8_t			is_fw_assigned_ipv6;
+};
+
+#define iscsi_dev_to_flash_conn(_dev) \
+	container_of(_dev, struct iscsi_bus_flash_conn, dev)
+
+#define iscsi_flash_conn_to_flash_session(_conn) \
+	iscsi_dev_to_flash_session(_conn->dev.parent)
+
+#define ISID_SIZE 6
+
+struct iscsi_bus_flash_session {
+	struct list_head sess_list;		/* item in session_list */
+	struct iscsi_transport *transport;
+	unsigned int target_id;
+	int flash_state;	/* persistent or non-persistent */
+	void *dd_data;				/* LLD private data */
+	struct device dev;	/* sysfs transport/container device */
+	/* iscsi session parameters */
+	unsigned		first_burst;
+	unsigned		max_burst;
+	unsigned short		max_r2t;
+	int			default_taskmgmt_timeout;
+	int			initial_r2t_en;
+	int			imm_data_en;
+	int			time2wait;
+	int			time2retain;
+	int			pdu_inorder_en;
+	int			dataseq_inorder_en;
+	int			erl;
+	int			tpgt;
+	char			*username;
+	char			*username_in;
+	char			*password;
+	char			*password_in;
+	char			*targetname;
+	char			*targetalias;
+	char			*portal_type;
+	uint16_t		tsid;
+	uint16_t		chap_in_idx;
+	uint16_t		chap_out_idx;
+	/* index of iSCSI discovery session if the entry is
+	 * discovered by iSCSI discovery session
+	 */
+	uint16_t		discovery_parent_idx;
+	/* indicates if discovery was done through iSNS discovery service
+	 * or through sendTarget */
+	uint16_t		discovery_parent_type;
+	/* Firmware auto sendtarget discovery disable */
+	uint8_t			auto_snd_tgt_disable;
+	uint8_t			discovery_sess;
+	/* indicates if this flashnode entry is enabled or disabled */
+	uint8_t			entry_state;
+	uint8_t			chap_auth_en;
+	/* enables firmware to auto logout the discovery session on discovery
+	 * completion
+	 */
+	uint8_t			discovery_logout_en;
+	uint8_t			bidi_chap_en;
+	/* makes authentication for discovery session optional */
+	uint8_t			discovery_auth_optional;
+	uint8_t			isid[ISID_SIZE];
+	uint8_t			is_boot_target;
+};
+
+#define iscsi_dev_to_flash_session(_dev) \
+	container_of(_dev, struct iscsi_bus_flash_session, dev)
+
+#define iscsi_flash_session_to_shost(_session) \
+	dev_to_shost(_session->dev.parent)
+
 /*
  * session and connection functions that can be used by HW iSCSI LLDs
  */
@@ -330,4 +451,34 @@
 extern char *iscsi_get_port_state_name(struct Scsi_Host *shost);
 extern int iscsi_is_session_dev(const struct device *dev);
 
+extern char *iscsi_get_discovery_parent_name(int parent_type);
+extern struct device *
+iscsi_find_flashnode(struct Scsi_Host *shost, void *data,
+		     int (*fn)(struct device *dev, void *data));
+
+extern struct iscsi_bus_flash_session *
+iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index,
+			    struct iscsi_transport *transport, int dd_size);
+
+extern struct iscsi_bus_flash_conn *
+iscsi_create_flashnode_conn(struct Scsi_Host *shost,
+			    struct iscsi_bus_flash_session *fnode_sess,
+			    struct iscsi_transport *transport, int dd_size);
+
+extern void
+iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session *fnode_sess);
+
+extern void iscsi_destroy_all_flashnode(struct Scsi_Host *shost);
+extern int iscsi_flashnode_bus_match(struct device *dev,
+				     struct device_driver *drv);
+extern int iscsi_is_flashnode_conn_dev(struct device *dev, void *data);
+
+extern struct device *
+iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data,
+			  int (*fn)(struct device *dev, void *data));
+
+extern struct device *
+iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess,
+			  void *data,
+			  int (*fn)(struct device *dev, void *data));
 #endif
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
new file mode 100644
index 0000000..23a87d0
--- /dev/null
+++ b/include/target/iscsi/iscsi_transport.h
@@ -0,0 +1,83 @@
+#include <linux/module.h>
+#include <linux/list.h>
+#include "../../../drivers/target/iscsi/iscsi_target_core.h"
+
+struct iscsit_transport {
+#define ISCSIT_TRANSPORT_NAME	16
+	char name[ISCSIT_TRANSPORT_NAME];
+	int transport_type;
+	struct module *owner;
+	struct list_head t_node;
+	int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *);
+	int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
+	void (*iscsit_free_np)(struct iscsi_np *);
+	void (*iscsit_free_conn)(struct iscsi_conn *);
+	struct iscsi_cmd *(*iscsit_alloc_cmd)(struct iscsi_conn *, gfp_t);
+	int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
+	int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);
+	int (*iscsit_immediate_queue)(struct iscsi_conn *, struct iscsi_cmd *, int);
+	int (*iscsit_response_queue)(struct iscsi_conn *, struct iscsi_cmd *, int);
+	int (*iscsit_get_dataout)(struct iscsi_conn *, struct iscsi_cmd *, bool);
+	int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *);
+	int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *);
+};
+
+/*
+ * From iscsi_target_transport.c
+ */
+
+extern int iscsit_register_transport(struct iscsit_transport *);
+extern void iscsit_unregister_transport(struct iscsit_transport *);
+extern struct iscsit_transport *iscsit_get_transport(int);
+extern void iscsit_put_transport(struct iscsit_transport *);
+
+/*
+ * From iscsi_target.c
+ */
+extern int iscsit_add_reject_from_cmd(u8, int, int, unsigned char *,
+				struct iscsi_cmd *);
+extern int iscsit_setup_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+				unsigned char *);
+extern void iscsit_set_unsoliticed_dataout(struct iscsi_cmd *);
+extern int iscsit_process_scsi_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+				struct iscsi_scsi_req *);
+extern int iscsit_check_dataout_hdr(struct iscsi_conn *, unsigned char *,
+				struct iscsi_cmd **);
+extern int iscsit_check_dataout_payload(struct iscsi_cmd *, struct iscsi_data *,
+				bool);
+extern int iscsit_handle_nop_out(struct iscsi_conn *, struct iscsi_cmd *,
+				unsigned char *);
+extern int iscsit_handle_logout_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+				unsigned char *);
+extern int iscsit_handle_task_mgt_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+				unsigned char *);
+extern void iscsit_build_rsp_pdu(struct iscsi_cmd *, struct iscsi_conn *,
+				bool, struct iscsi_scsi_rsp *);
+extern void iscsit_build_nopin_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+				struct iscsi_nopin *, bool);
+extern void iscsit_build_task_mgt_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+				struct iscsi_tm_rsp *);
+extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *,
+				struct iscsi_reject *);
+extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *,
+				struct iscsi_logout_rsp *);
+extern int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+/*
+ * From iscsi_target_device.c
+ */
+extern void iscsit_increment_maxcmdsn(struct iscsi_cmd *, struct iscsi_session *);
+/*
+ * From iscsi_target_erl1.c
+ */
+extern void iscsit_stop_dataout_timer(struct iscsi_cmd *);
+
+/*
+ * From iscsi_target_tmr.c
+ */
+extern int iscsit_tmr_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+
+/*
+ * From iscsi_target_util.c
+ */
+extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, gfp_t);
+extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *, __be32);
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index b128c20..ffa2696 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -60,6 +60,10 @@
 u32	sbc_get_device_rev(struct se_device *dev);
 u32	sbc_get_device_type(struct se_device *dev);
 sector_t	sbc_get_write_same_sectors(struct se_cmd *cmd);
+sense_reason_t sbc_execute_unmap(struct se_cmd *cmd,
+	sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv,
+				      sector_t lba, sector_t nolb),
+	void *priv);
 
 void	transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
 int	transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index aaa1ee6..ba3471b 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -120,7 +120,7 @@
 int	transport_check_aborted_status(struct se_cmd *, int);
 int	transport_send_check_condition_and_sense(struct se_cmd *,
 		sense_reason_t, int);
-
+int	target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
 int	target_put_sess_cmd(struct se_session *, struct se_cmd *);
 void	target_sess_cmd_list_set_waiting(struct se_session *);
 void	target_wait_for_sess_cmds(struct se_session *, int);
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 873e086..249df37 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -11,6 +11,7 @@
 #define DEBUGFS_MAGIC          0x64626720
 #define SECURITYFS_MAGIC	0x73636673
 #define SELINUX_MAGIC		0xf97cff8c
+#define SMACK_MAGIC		0x43415d53	/* "SMAC" */
 #define RAMFS_MAGIC		0x858458f6	/* some random number */
 #define TMPFS_MAGIC		0x01021994
 #define HUGETLBFS_MAGIC 	0x958458f6	/* some random number */
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index d4feda0..bef86d1 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -76,8 +76,20 @@
 
 bool valid_state(suspend_state_t state)
 {
-	if (state == PM_SUSPEND_FREEZE)
-		return true;
+	if (state == PM_SUSPEND_FREEZE) {
+#ifdef CONFIG_PM_DEBUG
+		if (pm_test_level != TEST_NONE &&
+		    pm_test_level != TEST_FREEZER &&
+		    pm_test_level != TEST_DEVICES &&
+		    pm_test_level != TEST_PLATFORM) {
+			printk(KERN_WARNING "Unsupported pm_test mode for "
+					"freeze state, please choose "
+					"none/freezer/devices/platform.\n");
+			return false;
+		}
+#endif
+			return true;
+	}
 	/*
 	 * PM_SUSPEND_STANDBY and PM_SUSPEND_MEMORY states need lowlevel
 	 * support and need to be valid to the lowlevel
@@ -184,6 +196,9 @@
 			goto Platform_wake;
 	}
 
+	if (suspend_test(TEST_PLATFORM))
+		goto Platform_wake;
+
 	/*
 	 * PM_SUSPEND_FREEZE equals
 	 * frozen processes + suspended devices + idle processors.
@@ -195,9 +210,6 @@
 		goto Platform_wake;
 	}
 
-	if (suspend_test(TEST_PLATFORM))
-		goto Platform_wake;
-
 	error = disable_nonboot_cpus();
 	if (error || suspend_test(TEST_CPUS))
 		goto Enable_cpus;
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 5af44b5..b7a1004 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -160,6 +160,8 @@
 		case BPF_S_ALU_AND_X:
 		case BPF_S_ALU_OR_K:
 		case BPF_S_ALU_OR_X:
+		case BPF_S_ALU_XOR_K:
+		case BPF_S_ALU_XOR_X:
 		case BPF_S_ALU_LSH_K:
 		case BPF_S_ALU_LSH_X:
 		case BPF_S_ALU_RSH_K:
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 0d62fd7..e149c64 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -534,14 +534,21 @@
 
 static noinline_for_stack
 char *symbol_string(char *buf, char *end, void *ptr,
-		    struct printf_spec spec, char ext)
+		    struct printf_spec spec, const char *fmt)
 {
-	unsigned long value = (unsigned long) ptr;
+	unsigned long value;
 #ifdef CONFIG_KALLSYMS
 	char sym[KSYM_SYMBOL_LEN];
-	if (ext == 'B')
+#endif
+
+	if (fmt[1] == 'R')
+		ptr = __builtin_extract_return_addr(ptr);
+	value = (unsigned long)ptr;
+
+#ifdef CONFIG_KALLSYMS
+	if (*fmt == 'B')
 		sprint_backtrace(sym, value);
-	else if (ext != 'f' && ext != 's')
+	else if (*fmt != 'f' && *fmt != 's')
 		sprint_symbol(sym, value);
 	else
 		sprint_symbol_no_offset(sym, value);
@@ -987,6 +994,7 @@
  * - 'f' For simple symbolic function names without offset
  * - 'S' For symbolic direct pointers with offset
  * - 's' For symbolic direct pointers without offset
+ * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
  * - 'B' For backtraced symbolic direct pointers with offset
  * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
  * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
@@ -1060,7 +1068,7 @@
 	case 'S':
 	case 's':
 	case 'B':
-		return symbol_string(buf, end, ptr, spec, *fmt);
+		return symbol_string(buf, end, ptr, spec, fmt);
 	case 'R':
 	case 'r':
 		return resource_string(buf, end, ptr, spec, fmt);
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index 516fe2c..241b54f 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -3,6 +3,7 @@
 
 config SUNRPC_GSS
 	tristate
+	select OID_REGISTRY
 
 config SUNRPC_BACKCHANNEL
 	bool
@@ -24,7 +25,6 @@
 config SUNRPC_SWAP
 	bool
 	depends on SUNRPC
-	select NETVM
 
 config RPCSEC_GSS_KRB5
 	tristate "Secure RPC: Kerberos V mechanism"
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index f529404..ed2fdd2 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -82,7 +82,7 @@
 
 static u32
 pseudoflavor_to_flavor(u32 flavor) {
-	if (flavor >= RPC_AUTH_MAXFLAVOR)
+	if (flavor > RPC_AUTH_MAXFLAVOR)
 		return RPC_AUTH_GSS;
 	return flavor;
 }
@@ -124,6 +124,79 @@
 EXPORT_SYMBOL_GPL(rpcauth_unregister);
 
 /**
+ * rpcauth_get_pseudoflavor - check if security flavor is supported
+ * @flavor: a security flavor
+ * @info: a GSS mech OID, quality of protection, and service value
+ *
+ * Verifies that an appropriate kernel module is available or already loaded.
+ * Returns an equivalent pseudoflavor, or RPC_AUTH_MAXFLAVOR if "flavor" is
+ * not supported locally.
+ */
+rpc_authflavor_t
+rpcauth_get_pseudoflavor(rpc_authflavor_t flavor, struct rpcsec_gss_info *info)
+{
+	const struct rpc_authops *ops;
+	rpc_authflavor_t pseudoflavor;
+
+	ops = auth_flavors[flavor];
+	if (ops == NULL)
+		request_module("rpc-auth-%u", flavor);
+	spin_lock(&rpc_authflavor_lock);
+	ops = auth_flavors[flavor];
+	if (ops == NULL || !try_module_get(ops->owner)) {
+		spin_unlock(&rpc_authflavor_lock);
+		return RPC_AUTH_MAXFLAVOR;
+	}
+	spin_unlock(&rpc_authflavor_lock);
+
+	pseudoflavor = flavor;
+	if (ops->info2flavor != NULL)
+		pseudoflavor = ops->info2flavor(info);
+
+	module_put(ops->owner);
+	return pseudoflavor;
+}
+EXPORT_SYMBOL_GPL(rpcauth_get_pseudoflavor);
+
+/**
+ * rpcauth_get_gssinfo - find GSS tuple matching a GSS pseudoflavor
+ * @pseudoflavor: GSS pseudoflavor to match
+ * @info: rpcsec_gss_info structure to fill in
+ *
+ * Returns zero and fills in "info" if pseudoflavor matches a
+ * supported mechanism.
+ */
+int
+rpcauth_get_gssinfo(rpc_authflavor_t pseudoflavor, struct rpcsec_gss_info *info)
+{
+	rpc_authflavor_t flavor = pseudoflavor_to_flavor(pseudoflavor);
+	const struct rpc_authops *ops;
+	int result;
+
+	if (flavor >= RPC_AUTH_MAXFLAVOR)
+		return -EINVAL;
+
+	ops = auth_flavors[flavor];
+	if (ops == NULL)
+		request_module("rpc-auth-%u", flavor);
+	spin_lock(&rpc_authflavor_lock);
+	ops = auth_flavors[flavor];
+	if (ops == NULL || !try_module_get(ops->owner)) {
+		spin_unlock(&rpc_authflavor_lock);
+		return -ENOENT;
+	}
+	spin_unlock(&rpc_authflavor_lock);
+
+	result = -ENOENT;
+	if (ops->flavor2info != NULL)
+		result = ops->flavor2info(pseudoflavor, info);
+
+	module_put(ops->owner);
+	return result;
+}
+EXPORT_SYMBOL_GPL(rpcauth_get_gssinfo);
+
+/**
  * rpcauth_list_flavors - discover registered flavors and pseudoflavors
  * @array: array to fill in
  * @size: size of "array"
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 5257d29..51415b0 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1641,6 +1641,8 @@
 	.pipes_create	= gss_pipes_dentries_create,
 	.pipes_destroy	= gss_pipes_dentries_destroy,
 	.list_pseudoflavors = gss_mech_list_pseudoflavors,
+	.info2flavor	= gss_mech_info2flavor,
+	.flavor2info	= gss_mech_flavor2info,
 };
 
 static const struct rpc_credops gss_credops = {
@@ -1733,6 +1735,7 @@
 	rcu_barrier(); /* Wait for completion of call_rcu()'s */
 }
 
+MODULE_ALIAS("rpc-auth-6");
 MODULE_LICENSE("GPL");
 module_param_named(expired_cred_retry_delay,
 		   gss_expired_cred_retry_delay,
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index d3611f1..33255ff 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -729,16 +729,19 @@
 static struct pf_desc gss_kerberos_pfs[] = {
 	[0] = {
 		.pseudoflavor = RPC_AUTH_GSS_KRB5,
+		.qop = GSS_C_QOP_DEFAULT,
 		.service = RPC_GSS_SVC_NONE,
 		.name = "krb5",
 	},
 	[1] = {
 		.pseudoflavor = RPC_AUTH_GSS_KRB5I,
+		.qop = GSS_C_QOP_DEFAULT,
 		.service = RPC_GSS_SVC_INTEGRITY,
 		.name = "krb5i",
 	},
 	[2] = {
 		.pseudoflavor = RPC_AUTH_GSS_KRB5P,
+		.qop = GSS_C_QOP_DEFAULT,
 		.service = RPC_GSS_SVC_PRIVACY,
 		.name = "krb5p",
 	},
@@ -750,11 +753,12 @@
 MODULE_ALIAS("rpc-auth-gss-390003");
 MODULE_ALIAS("rpc-auth-gss-390004");
 MODULE_ALIAS("rpc-auth-gss-390005");
+MODULE_ALIAS("rpc-auth-gss-1.2.840.113554.1.2.2");
 
 static struct gss_api_mech gss_kerberos_mech = {
 	.gm_name	= "krb5",
 	.gm_owner	= THIS_MODULE,
-	.gm_oid		= {9, (void *)"\x2a\x86\x48\x86\xf7\x12\x01\x02\x02"},
+	.gm_oid		= { 9, "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02" },
 	.gm_ops		= &gss_kerberos_ops,
 	.gm_pf_num	= ARRAY_SIZE(gss_kerberos_pfs),
 	.gm_pfs		= gss_kerberos_pfs,
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index f0f4eee..79881d6 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -36,6 +36,7 @@
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/oid_registry.h>
 #include <linux/sunrpc/msg_prot.h>
 #include <linux/sunrpc/gss_asn1.h>
 #include <linux/sunrpc/auth_gss.h>
@@ -102,8 +103,13 @@
 	return status;
 }
 
-int
-gss_mech_register(struct gss_api_mech *gm)
+/**
+ * gss_mech_register - register a GSS mechanism
+ * @gm: GSS mechanism handle
+ *
+ * Returns zero if successful, or a negative errno.
+ */
+int gss_mech_register(struct gss_api_mech *gm)
 {
 	int status;
 
@@ -116,11 +122,14 @@
 	dprintk("RPC:       registered gss mechanism %s\n", gm->gm_name);
 	return 0;
 }
-
 EXPORT_SYMBOL_GPL(gss_mech_register);
 
-void
-gss_mech_unregister(struct gss_api_mech *gm)
+/**
+ * gss_mech_unregister - release a GSS mechanism
+ * @gm: GSS mechanism handle
+ *
+ */
+void gss_mech_unregister(struct gss_api_mech *gm)
 {
 	spin_lock(&registered_mechs_lock);
 	list_del(&gm->gm_list);
@@ -128,18 +137,14 @@
 	dprintk("RPC:       unregistered gss mechanism %s\n", gm->gm_name);
 	gss_mech_free(gm);
 }
-
 EXPORT_SYMBOL_GPL(gss_mech_unregister);
 
-struct gss_api_mech *
-gss_mech_get(struct gss_api_mech *gm)
+static struct gss_api_mech *gss_mech_get(struct gss_api_mech *gm)
 {
 	__module_get(gm->gm_owner);
 	return gm;
 }
 
-EXPORT_SYMBOL_GPL(gss_mech_get);
-
 static struct gss_api_mech *
 _gss_mech_get_by_name(const char *name)
 {
@@ -169,12 +174,16 @@
 	}
 	return gm;
 }
-EXPORT_SYMBOL_GPL(gss_mech_get_by_name);
 
-struct gss_api_mech *
-gss_mech_get_by_OID(struct xdr_netobj *obj)
+static struct gss_api_mech *gss_mech_get_by_OID(struct rpcsec_gss_oid *obj)
 {
 	struct gss_api_mech	*pos, *gm = NULL;
+	char buf[32];
+
+	if (sprint_oid(obj->data, obj->len, buf, sizeof(buf)) < 0)
+		return NULL;
+	dprintk("RPC:       %s(%s)\n", __func__, buf);
+	request_module("rpc-auth-gss-%s", buf);
 
 	spin_lock(&registered_mechs_lock);
 	list_for_each_entry(pos, &registered_mechs, gm_list) {
@@ -188,11 +197,8 @@
 	}
 	spin_unlock(&registered_mechs_lock);
 	return gm;
-
 }
 
-EXPORT_SYMBOL_GPL(gss_mech_get_by_OID);
-
 static inline int
 mech_supports_pseudoflavor(struct gss_api_mech *gm, u32 pseudoflavor)
 {
@@ -237,8 +243,6 @@
 	return gm;
 }
 
-EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor);
-
 /**
  * gss_mech_list_pseudoflavors - Discover registered GSS pseudoflavors
  * @array: array to fill in
@@ -268,19 +272,82 @@
 	return i;
 }
 
-u32
-gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 service)
+/**
+ * gss_svc_to_pseudoflavor - map a GSS service number to a pseudoflavor
+ * @gm: GSS mechanism handle
+ * @qop: GSS quality-of-protection value
+ * @service: GSS service value
+ *
+ * Returns a matching security flavor, or RPC_AUTH_MAXFLAVOR if none is found.
+ */
+rpc_authflavor_t gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 qop,
+					 u32 service)
 {
 	int i;
 
 	for (i = 0; i < gm->gm_pf_num; i++) {
-		if (gm->gm_pfs[i].service == service) {
+		if (gm->gm_pfs[i].qop == qop &&
+		    gm->gm_pfs[i].service == service) {
 			return gm->gm_pfs[i].pseudoflavor;
 		}
 	}
-	return RPC_AUTH_MAXFLAVOR; /* illegal value */
+	return RPC_AUTH_MAXFLAVOR;
 }
-EXPORT_SYMBOL_GPL(gss_svc_to_pseudoflavor);
+
+/**
+ * gss_mech_info2flavor - look up a pseudoflavor given a GSS tuple
+ * @info: a GSS mech OID, quality of protection, and service value
+ *
+ * Returns a matching pseudoflavor, or RPC_AUTH_MAXFLAVOR if the tuple is
+ * not supported.
+ */
+rpc_authflavor_t gss_mech_info2flavor(struct rpcsec_gss_info *info)
+{
+	rpc_authflavor_t pseudoflavor;
+	struct gss_api_mech *gm;
+
+	gm = gss_mech_get_by_OID(&info->oid);
+	if (gm == NULL)
+		return RPC_AUTH_MAXFLAVOR;
+
+	pseudoflavor = gss_svc_to_pseudoflavor(gm, info->qop, info->service);
+
+	gss_mech_put(gm);
+	return pseudoflavor;
+}
+
+/**
+ * gss_mech_flavor2info - look up a GSS tuple for a given pseudoflavor
+ * @pseudoflavor: GSS pseudoflavor to match
+ * @info: rpcsec_gss_info structure to fill in
+ *
+ * Returns zero and fills in "info" if pseudoflavor matches a
+ * supported mechanism.  Otherwise a negative errno is returned.
+ */
+int gss_mech_flavor2info(rpc_authflavor_t pseudoflavor,
+			 struct rpcsec_gss_info *info)
+{
+	struct gss_api_mech *gm;
+	int i;
+
+	gm = gss_mech_get_by_pseudoflavor(pseudoflavor);
+	if (gm == NULL)
+		return -ENOENT;
+
+	for (i = 0; i < gm->gm_pf_num; i++) {
+		if (gm->gm_pfs[i].pseudoflavor == pseudoflavor) {
+			memcpy(info->oid.data, gm->gm_oid.data, gm->gm_oid.len);
+			info->oid.len = gm->gm_oid.len;
+			info->qop = gm->gm_pfs[i].qop;
+			info->service = gm->gm_pfs[i].service;
+			gss_mech_put(gm);
+			return 0;
+		}
+	}
+
+	gss_mech_put(gm);
+	return -ENOENT;
+}
 
 u32
 gss_pseudoflavor_to_service(struct gss_api_mech *gm, u32 pseudoflavor)
@@ -294,8 +361,6 @@
 	return 0;
 }
 
-EXPORT_SYMBOL_GPL(gss_pseudoflavor_to_service);
-
 char *
 gss_service_to_auth_domain_name(struct gss_api_mech *gm, u32 service)
 {
@@ -308,8 +373,6 @@
 	return NULL;
 }
 
-EXPORT_SYMBOL_GPL(gss_service_to_auth_domain_name);
-
 void
 gss_mech_put(struct gss_api_mech * gm)
 {
@@ -317,8 +380,6 @@
 		module_put(gm->gm_owner);
 }
 
-EXPORT_SYMBOL_GPL(gss_mech_put);
-
 /* The mech could probably be determined from the token instead, but it's just
  * as easy for now to pass it in. */
 int
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 5ead605..c3ba570 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1220,7 +1220,9 @@
 		svcdata->rsci = rsci;
 		cache_get(&rsci->h);
 		rqstp->rq_cred.cr_flavor = gss_svc_to_pseudoflavor(
-					rsci->mechctx->mech_type, gc->gc_svc);
+					rsci->mechctx->mech_type,
+					GSS_C_QOP_DEFAULT,
+					gc->gc_svc);
 		ret = SVC_OK;
 		goto out;
 	}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d5f35f1..d259fa9 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -411,6 +411,8 @@
 	};
 	char servername[48];
 
+	if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
+		xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
 	/*
 	 * If the caller chooses not to specify a hostname, whip
 	 * up a string representation of the passed-in address.
@@ -1301,6 +1303,8 @@
 	xprt_reserve(task);
 }
 
+static void call_retry_reserve(struct rpc_task *task);
+
 /*
  * 1b.	Grok the result of xprt_reserve()
  */
@@ -1342,7 +1346,7 @@
 	case -ENOMEM:
 		rpc_delay(task, HZ >> 2);
 	case -EAGAIN:	/* woken up; retry */
-		task->tk_action = call_reserve;
+		task->tk_action = call_retry_reserve;
 		return;
 	case -EIO:	/* probably a shutdown */
 		break;
@@ -1355,6 +1359,19 @@
 }
 
 /*
+ * 1c.	Retry reserving an RPC call slot
+ */
+static void
+call_retry_reserve(struct rpc_task *task)
+{
+	dprint_status(task);
+
+	task->tk_status  = 0;
+	task->tk_action  = call_reserveresult;
+	xprt_retry_reserve(task);
+}
+
+/*
  * 2.	Bind and/or refresh the credentials
  */
 static void
@@ -1639,22 +1656,26 @@
 
 	dprint_status(task);
 
-	task->tk_status = 0;
-	if (status >= 0 || status == -EAGAIN) {
-		clnt->cl_stats->netreconn++;
-		task->tk_action = call_transmit;
-		return;
-	}
-
 	trace_rpc_connect_status(task, status);
 	switch (status) {
 		/* if soft mounted, test if we've timed out */
 	case -ETIMEDOUT:
 		task->tk_action = call_timeout;
-		break;
-	default:
-		rpc_exit(task, -EIO);
+		return;
+	case -ECONNREFUSED:
+	case -ECONNRESET:
+	case -ENETUNREACH:
+		if (RPC_IS_SOFTCONN(task))
+			break;
+		/* retry with existing socket, after a delay */
+	case 0:
+	case -EAGAIN:
+		task->tk_status = 0;
+		clnt->cl_stats->netreconn++;
+		task->tk_action = call_transmit;
+		return;
 	}
+	rpc_exit(task, status);
 }
 
 /*
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index b7478d5..745fca3 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -948,6 +948,34 @@
 	spin_unlock_bh(&xprt->transport_lock);
 }
 
+static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+	set_bit(XPRT_CONGESTED, &xprt->state);
+	rpc_sleep_on(&xprt->backlog, task, NULL);
+}
+
+static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
+{
+	if (rpc_wake_up_next(&xprt->backlog) == NULL)
+		clear_bit(XPRT_CONGESTED, &xprt->state);
+}
+
+static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+	bool ret = false;
+
+	if (!test_bit(XPRT_CONGESTED, &xprt->state))
+		goto out;
+	spin_lock(&xprt->reserve_lock);
+	if (test_bit(XPRT_CONGESTED, &xprt->state)) {
+		rpc_sleep_on(&xprt->backlog, task, NULL);
+		ret = true;
+	}
+	spin_unlock(&xprt->reserve_lock);
+out:
+	return ret;
+}
+
 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
 {
 	struct rpc_rqst *req = ERR_PTR(-EAGAIN);
@@ -992,7 +1020,7 @@
 		task->tk_status = -ENOMEM;
 		break;
 	case -EAGAIN:
-		rpc_sleep_on(&xprt->backlog, task, NULL);
+		xprt_add_backlog(xprt, task);
 		dprintk("RPC:       waiting for request slot\n");
 	default:
 		task->tk_status = -EAGAIN;
@@ -1028,7 +1056,7 @@
 		memset(req, 0, sizeof(*req));	/* mark unused */
 		list_add(&req->rq_list, &xprt->free);
 	}
-	rpc_wake_up_next(&xprt->backlog);
+	xprt_wake_up_backlog(xprt);
 	spin_unlock(&xprt->reserve_lock);
 }
 
@@ -1092,7 +1120,8 @@
  * xprt_reserve - allocate an RPC request slot
  * @task: RPC task requesting a slot allocation
  *
- * If no more slots are available, place the task on the transport's
+ * If the transport is marked as being congested, or if no more
+ * slots are available, place the task on the transport's
  * backlog queue.
  */
 void xprt_reserve(struct rpc_task *task)
@@ -1107,6 +1136,32 @@
 	task->tk_status = -EAGAIN;
 	rcu_read_lock();
 	xprt = rcu_dereference(task->tk_client->cl_xprt);
+	if (!xprt_throttle_congested(xprt, task))
+		xprt->ops->alloc_slot(xprt, task);
+	rcu_read_unlock();
+}
+
+/**
+ * xprt_retry_reserve - allocate an RPC request slot
+ * @task: RPC task requesting a slot allocation
+ *
+ * If no more slots are available, place the task on the transport's
+ * backlog queue.
+ * Note that the only difference with xprt_reserve is that we now
+ * ignore the value of the XPRT_CONGESTED flag.
+ */
+void xprt_retry_reserve(struct rpc_task *task)
+{
+	struct rpc_xprt	*xprt;
+
+	task->tk_status = 0;
+	if (task->tk_rqstp != NULL)
+		return;
+
+	task->tk_timeout = 0;
+	task->tk_status = -EAGAIN;
+	rcu_read_lock();
+	xprt = rcu_dereference(task->tk_client->cl_xprt);
 	xprt->ops->alloc_slot(xprt, task);
 	rcu_read_unlock();
 }
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 3d02130..9c28258 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2207,10 +2207,6 @@
 		 */
 		xs_tcp_force_close(xprt);
 		break;
-	case -ECONNREFUSED:
-	case -ECONNRESET:
-	case -ENETUNREACH:
-		/* retry with existing socket, after a delay */
 	case 0:
 	case -EINPROGRESS:
 	case -EALREADY:
@@ -2221,6 +2217,10 @@
 		/* Happens, for instance, if the user specified a link
 		 * local IPv6 address without a scope-id.
 		 */
+	case -ECONNREFUSED:
+	case -ECONNRESET:
+	case -ENETUNREACH:
+		/* retry with existing socket, after a delay */
 		goto out;
 	}
 out_eagain:
@@ -2767,9 +2767,13 @@
 	struct rpc_xprt *xprt;
 	struct sock_xprt *transport;
 	struct rpc_xprt *ret;
+	unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
+
+	if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
+		max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
 
 	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
-			xprt_max_tcp_slot_table_entries);
+			max_slot_table_size);
 	if (IS_ERR(xprt))
 		return xprt;
 	transport = container_of(xprt, struct sock_xprt, xprt);
diff --git a/security/capability.c b/security/capability.c
index 6783c3e..1728d4e 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -98,9 +98,10 @@
 	return 0;
 }
 
-static void cap_sb_clone_mnt_opts(const struct super_block *oldsb,
+static int cap_sb_clone_mnt_opts(const struct super_block *oldsb,
 				  struct super_block *newsb)
 {
+	return 0;
 }
 
 static int cap_sb_parse_opts_str(char *options, struct security_mnt_opts *opts)
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 3b3b7e6..6c491a6 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -189,11 +189,9 @@
 	if (rc != 0)
 		goto out_digsig;
 
-	if (function != BPRM_CHECK)
-		pathname = ima_d_path(&file->f_path, &pathbuf);
-
+	pathname = !filename ? ima_d_path(&file->f_path, &pathbuf) : filename;
 	if (!pathname)
-		pathname = filename;
+		pathname = (const char *)file->f_dentry->d_name.name;
 
 	if (action & IMA_MEASURE)
 		ima_store_measurement(iint, file, pathname);
@@ -226,8 +224,7 @@
 int ima_file_mmap(struct file *file, unsigned long prot)
 {
 	if (file && (prot & PROT_EXEC))
-		return process_measurement(file, file->f_dentry->d_name.name,
-					   MAY_EXEC, MMAP_CHECK);
+		return process_measurement(file, NULL, MAY_EXEC, MMAP_CHECK);
 	return 0;
 }
 
@@ -265,7 +262,7 @@
 int ima_file_check(struct file *file, int mask)
 {
 	ima_rdwr_violation_check(file);
-	return process_measurement(file, file->f_dentry->d_name.name,
+	return process_measurement(file, NULL,
 				 mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
 				 FILE_CHECK);
 }
@@ -290,8 +287,7 @@
 #endif
 		return 0;	/* We rely on module signature checking */
 	}
-	return process_measurement(file, file->f_dentry->d_name.name,
-				   MAY_EXEC, MODULE_CHECK);
+	return process_measurement(file, NULL, MAY_EXEC, MODULE_CHECK);
 }
 
 static int __init init_ima(void)
diff --git a/security/security.c b/security/security.c
index 03f248b..a3dce87 100644
--- a/security/security.c
+++ b/security/security.c
@@ -299,10 +299,10 @@
 }
 EXPORT_SYMBOL(security_sb_set_mnt_opts);
 
-void security_sb_clone_mnt_opts(const struct super_block *oldsb,
+int security_sb_clone_mnt_opts(const struct super_block *oldsb,
 				struct super_block *newsb)
 {
-	security_ops->sb_clone_mnt_opts(oldsb, newsb);
+	return security_ops->sb_clone_mnt_opts(oldsb, newsb);
 }
 EXPORT_SYMBOL(security_sb_clone_mnt_opts);
 
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 7171a95..feb2f42 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -751,7 +751,37 @@
 	goto out;
 }
 
-static void selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
+static int selinux_cmp_sb_context(const struct super_block *oldsb,
+				    const struct super_block *newsb)
+{
+	struct superblock_security_struct *old = oldsb->s_security;
+	struct superblock_security_struct *new = newsb->s_security;
+	char oldflags = old->flags & SE_MNTMASK;
+	char newflags = new->flags & SE_MNTMASK;
+
+	if (oldflags != newflags)
+		goto mismatch;
+	if ((oldflags & FSCONTEXT_MNT) && old->sid != new->sid)
+		goto mismatch;
+	if ((oldflags & CONTEXT_MNT) && old->mntpoint_sid != new->mntpoint_sid)
+		goto mismatch;
+	if ((oldflags & DEFCONTEXT_MNT) && old->def_sid != new->def_sid)
+		goto mismatch;
+	if (oldflags & ROOTCONTEXT_MNT) {
+		struct inode_security_struct *oldroot = oldsb->s_root->d_inode->i_security;
+		struct inode_security_struct *newroot = newsb->s_root->d_inode->i_security;
+		if (oldroot->sid != newroot->sid)
+			goto mismatch;
+	}
+	return 0;
+mismatch:
+	printk(KERN_WARNING "SELinux: mount invalid.  Same superblock, "
+			    "different security settings for (dev %s, "
+			    "type %s)\n", newsb->s_id, newsb->s_type->name);
+	return -EBUSY;
+}
+
+static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
 					struct super_block *newsb)
 {
 	const struct superblock_security_struct *oldsbsec = oldsb->s_security;
@@ -766,14 +796,14 @@
 	 * mount options.  thus we can safely deal with this superblock later
 	 */
 	if (!ss_initialized)
-		return;
+		return 0;
 
 	/* how can we clone if the old one wasn't set up?? */
 	BUG_ON(!(oldsbsec->flags & SE_SBINITIALIZED));
 
-	/* if fs is reusing a sb, just let its options stand... */
+	/* if fs is reusing a sb, make sure that the contexts match */
 	if (newsbsec->flags & SE_SBINITIALIZED)
-		return;
+		return selinux_cmp_sb_context(oldsb, newsb);
 
 	mutex_lock(&newsbsec->lock);
 
@@ -806,6 +836,7 @@
 
 	sb_finish_set_opts(newsb);
 	mutex_unlock(&newsbsec->lock);
+	return 0;
 }
 
 static int selinux_parse_opts_str(char *options,
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 99b3612..8ad30955 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -149,11 +149,6 @@
 #define SMACK_CIPSO_SOCKET	1
 
 /*
- * smackfs magic number
- */
-#define SMACK_MAGIC	0x43415d53 /* "SMAC" */
-
-/*
  * CIPSO defaults.
  */
 #define SMACK_CIPSO_DOI_DEFAULT		3	/* Historical */
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index db14689..2e397a8 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -252,6 +252,8 @@
 		string[i++] = 'x';
 	if (access & MAY_APPEND)
 		string[i++] = 'a';
+	if (access & MAY_TRANSMUTE)
+		string[i++] = 't';
 	string[i] = '\0';
 }
 /**
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index fa64740..d52c780 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -654,7 +654,7 @@
 		/*
 		 * You also need write access to the containing directory
 		 */
-		smk_ad_setfield_u_fs_path_dentry(&ad, NULL);
+		smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE);
 		smk_ad_setfield_u_fs_inode(&ad, dir);
 		rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad);
 	}
@@ -685,7 +685,7 @@
 		/*
 		 * You also need write access to the containing directory
 		 */
-		smk_ad_setfield_u_fs_path_dentry(&ad, NULL);
+		smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE);
 		smk_ad_setfield_u_fs_inode(&ad, dir);
 		rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad);
 	}
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 76a5dca..53a08b8 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -26,6 +26,7 @@
 #include <linux/seq_file.h>
 #include <linux/ctype.h>
 #include <linux/audit.h>
+#include <linux/magic.h>
 #include "smack.h"
 
 /*
@@ -50,12 +51,12 @@
 	SMK_ACCESS2	= 16,	/* make an access check with long labels */
 	SMK_CIPSO2	= 17,	/* load long label -> CIPSO mapping */
 	SMK_REVOKE_SUBJ	= 18,	/* set rules with subject label to '-' */
+	SMK_CHANGE_RULE	= 19,	/* change or add rules (long labels) */
 };
 
 /*
  * List locks
  */
-static DEFINE_MUTEX(smack_list_lock);
 static DEFINE_MUTEX(smack_cipso_lock);
 static DEFINE_MUTEX(smack_ambient_lock);
 static DEFINE_MUTEX(smk_netlbladdr_lock);
@@ -110,6 +111,13 @@
 
 LIST_HEAD(smack_rule_list);
 
+struct smack_parsed_rule {
+	char			*smk_subject;
+	char			*smk_object;
+	int			smk_access1;
+	int			smk_access2;
+};
+
 static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
 
 const char *smack_cipso_option = SMACK_CIPSO_OPTION;
@@ -167,25 +175,28 @@
 #define SMK_NETLBLADDRMIN	9
 
 /**
- * smk_set_access - add a rule to the rule list
- * @srp: the new rule to add
+ * smk_set_access - add a rule to the rule list or replace an old rule
+ * @srp: the rule to add or replace
  * @rule_list: the list of rules
  * @rule_lock: the rule list lock
+ * @global: if non-zero, indicates a global rule
  *
  * Looks through the current subject/object/access list for
  * the subject/object pair and replaces the access that was
  * there. If the pair isn't found add it with the specified
  * access.
  *
- * Returns 1 if a rule was found to exist already, 0 if it is new
  * Returns 0 if nothing goes wrong or -ENOMEM if it fails
  * during the allocation of the new pair to add.
  */
-static int smk_set_access(struct smack_rule *srp, struct list_head *rule_list,
-				struct mutex *rule_lock)
+static int smk_set_access(struct smack_parsed_rule *srp,
+				struct list_head *rule_list,
+				struct mutex *rule_lock, int global)
 {
 	struct smack_rule *sp;
+	struct smack_master_list *smlp;
 	int found = 0;
+	int rc = 0;
 
 	mutex_lock(rule_lock);
 
@@ -197,23 +208,89 @@
 		if (sp->smk_object == srp->smk_object &&
 		    sp->smk_subject == srp->smk_subject) {
 			found = 1;
-			sp->smk_access = srp->smk_access;
+			sp->smk_access |= srp->smk_access1;
+			sp->smk_access &= ~srp->smk_access2;
 			break;
 		}
 	}
-	if (found == 0)
-		list_add_rcu(&srp->list, rule_list);
 
+	if (found == 0) {
+		sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+		if (sp == NULL) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		sp->smk_subject = srp->smk_subject;
+		sp->smk_object = srp->smk_object;
+		sp->smk_access = srp->smk_access1 & ~srp->smk_access2;
+
+		list_add_rcu(&sp->list, rule_list);
+		/*
+		 * If this is a global as opposed to self and a new rule
+		 * it needs to get added for reporting.
+		 */
+		if (global) {
+			smlp = kzalloc(sizeof(*smlp), GFP_KERNEL);
+			if (smlp != NULL) {
+				smlp->smk_rule = sp;
+				list_add_rcu(&smlp->list, &smack_rule_list);
+			} else
+				rc = -ENOMEM;
+		}
+	}
+
+out:
 	mutex_unlock(rule_lock);
+	return rc;
+}
 
-	return found;
+/**
+ * smk_perm_from_str - parse smack accesses from a text string
+ * @string: a text string that contains a Smack accesses code
+ *
+ * Returns an integer with respective bits set for specified accesses.
+ */
+static int smk_perm_from_str(const char *string)
+{
+	int perm = 0;
+	const char *cp;
+
+	for (cp = string; ; cp++)
+		switch (*cp) {
+		case '-':
+			break;
+		case 'r':
+		case 'R':
+			perm |= MAY_READ;
+			break;
+		case 'w':
+		case 'W':
+			perm |= MAY_WRITE;
+			break;
+		case 'x':
+		case 'X':
+			perm |= MAY_EXEC;
+			break;
+		case 'a':
+		case 'A':
+			perm |= MAY_APPEND;
+			break;
+		case 't':
+		case 'T':
+			perm |= MAY_TRANSMUTE;
+			break;
+		default:
+			return perm;
+		}
 }
 
 /**
  * smk_fill_rule - Fill Smack rule from strings
  * @subject: subject label string
  * @object: object label string
- * @access: access string
+ * @access1: access string
+ * @access2: string with permissions to be removed
  * @rule: Smack rule
  * @import: if non-zero, import labels
  * @len: label length limit
@@ -221,8 +298,9 @@
  * Returns 0 on success, -1 on failure
  */
 static int smk_fill_rule(const char *subject, const char *object,
-				const char *access, struct smack_rule *rule,
-				int import, int len)
+				const char *access1, const char *access2,
+				struct smack_parsed_rule *rule, int import,
+				int len)
 {
 	const char *cp;
 	struct smack_known *skp;
@@ -255,36 +333,11 @@
 		rule->smk_object = skp->smk_known;
 	}
 
-	rule->smk_access = 0;
-
-	for (cp = access; *cp != '\0'; cp++) {
-		switch (*cp) {
-		case '-':
-			break;
-		case 'r':
-		case 'R':
-			rule->smk_access |= MAY_READ;
-			break;
-		case 'w':
-		case 'W':
-			rule->smk_access |= MAY_WRITE;
-			break;
-		case 'x':
-		case 'X':
-			rule->smk_access |= MAY_EXEC;
-			break;
-		case 'a':
-		case 'A':
-			rule->smk_access |= MAY_APPEND;
-			break;
-		case 't':
-		case 'T':
-			rule->smk_access |= MAY_TRANSMUTE;
-			break;
-		default:
-			return 0;
-		}
-	}
+	rule->smk_access1 = smk_perm_from_str(access1);
+	if (access2)
+		rule->smk_access2 = smk_perm_from_str(access2);
+	else
+		rule->smk_access2 = ~rule->smk_access1;
 
 	return 0;
 }
@@ -297,30 +350,33 @@
  *
  * Returns 0 on success, -1 on errors.
  */
-static int smk_parse_rule(const char *data, struct smack_rule *rule, int import)
+static int smk_parse_rule(const char *data, struct smack_parsed_rule *rule,
+				int import)
 {
 	int rc;
 
 	rc = smk_fill_rule(data, data + SMK_LABELLEN,
-			   data + SMK_LABELLEN + SMK_LABELLEN, rule, import,
-			   SMK_LABELLEN);
+			   data + SMK_LABELLEN + SMK_LABELLEN, NULL, rule,
+			   import, SMK_LABELLEN);
 	return rc;
 }
 
 /**
  * smk_parse_long_rule - parse Smack rule from rule string
  * @data: string to be parsed, null terminated
- * @rule: Smack rule
+ * @rule: Will be filled with Smack parsed rule
  * @import: if non-zero, import labels
+ * @change: if non-zero, data is from /smack/change-rule
  *
  * Returns 0 on success, -1 on failure
  */
-static int smk_parse_long_rule(const char *data, struct smack_rule *rule,
-				int import)
+static int smk_parse_long_rule(const char *data, struct smack_parsed_rule *rule,
+				int import, int change)
 {
 	char *subject;
 	char *object;
-	char *access;
+	char *access1;
+	char *access2;
 	int datalen;
 	int rc = -1;
 
@@ -334,14 +390,27 @@
 	object = kzalloc(datalen, GFP_KERNEL);
 	if (object == NULL)
 		goto free_out_s;
-	access = kzalloc(datalen, GFP_KERNEL);
-	if (access == NULL)
+	access1 = kzalloc(datalen, GFP_KERNEL);
+	if (access1 == NULL)
 		goto free_out_o;
+	access2 = kzalloc(datalen, GFP_KERNEL);
+	if (access2 == NULL)
+		goto free_out_a;
 
-	if (sscanf(data, "%s %s %s", subject, object, access) == 3)
-		rc = smk_fill_rule(subject, object, access, rule, import, 0);
+	if (change) {
+		if (sscanf(data, "%s %s %s %s",
+			subject, object, access1, access2) == 4)
+			rc = smk_fill_rule(subject, object, access1, access2,
+				rule, import, 0);
+	} else {
+		if (sscanf(data, "%s %s %s", subject, object, access1) == 3)
+			rc = smk_fill_rule(subject, object, access1, NULL,
+				rule, import, 0);
+	}
 
-	kfree(access);
+	kfree(access2);
+free_out_a:
+	kfree(access1);
 free_out_o:
 	kfree(object);
 free_out_s:
@@ -351,6 +420,7 @@
 
 #define SMK_FIXED24_FMT	0	/* Fixed 24byte label format */
 #define SMK_LONG_FMT	1	/* Variable long label format */
+#define SMK_CHANGE_FMT	2	/* Rule modification format */
 /**
  * smk_write_rules_list - write() for any /smack rule file
  * @file: file pointer, not actually used
@@ -359,22 +429,24 @@
  * @ppos: where to start - must be 0
  * @rule_list: the list of rules to write to
  * @rule_lock: lock for the rule list
- * @format: /smack/load or /smack/load2 format.
+ * @format: /smack/load or /smack/load2 or /smack/change-rule format.
  *
  * Get one smack access rule from above.
  * The format for SMK_LONG_FMT is:
  *	"subject<whitespace>object<whitespace>access[<whitespace>...]"
  * The format for SMK_FIXED24_FMT is exactly:
  *	"subject                 object                  rwxat"
+ * The format for SMK_CHANGE_FMT is:
+ *	"subject<whitespace>object<whitespace>
+ *	 acc_enable<whitespace>acc_disable[<whitespace>...]"
  */
 static ssize_t smk_write_rules_list(struct file *file, const char __user *buf,
 					size_t count, loff_t *ppos,
 					struct list_head *rule_list,
 					struct mutex *rule_lock, int format)
 {
-	struct smack_master_list *smlp;
 	struct smack_known *skp;
-	struct smack_rule *rule;
+	struct smack_parsed_rule *rule;
 	char *data;
 	int datalen;
 	int rc = -EINVAL;
@@ -417,7 +489,11 @@
 		 * Be sure the data string is terminated.
 		 */
 		data[count] = '\0';
-		if (smk_parse_long_rule(data, rule, 1))
+		if (smk_parse_long_rule(data, rule, 1, 0))
+			goto out_free_rule;
+	} else if (format == SMK_CHANGE_FMT) {
+		data[count] = '\0';
+		if (smk_parse_long_rule(data, rule, 1, 1))
 			goto out_free_rule;
 	} else {
 		/*
@@ -437,22 +513,9 @@
 		rule_lock = &skp->smk_rules_lock;
 	}
 
-	rc = count;
-	/*
-	 * If this is a global as opposed to self and a new rule
-	 * it needs to get added for reporting.
-	 * smk_set_access returns true if there was already a rule
-	 * for the subject/object pair, and false if it was new.
-	 */
-	if (!smk_set_access(rule, rule_list, rule_lock)) {
-		if (load) {
-			smlp = kzalloc(sizeof(*smlp), GFP_KERNEL);
-			if (smlp != NULL) {
-				smlp->smk_rule = rule;
-				list_add_rcu(&smlp->list, &smack_rule_list);
-			} else
-				rc = -ENOMEM;
-		}
+	rc = smk_set_access(rule, rule_list, rule_lock, load);
+	if (rc == 0) {
+		rc = count;
 		goto out;
 	}
 
@@ -1774,7 +1837,7 @@
 static ssize_t smk_user_access(struct file *file, const char __user *buf,
 				size_t count, loff_t *ppos, int format)
 {
-	struct smack_rule rule;
+	struct smack_parsed_rule rule;
 	char *data;
 	char *cod;
 	int res;
@@ -1796,14 +1859,14 @@
 			return -ENOMEM;
 		memcpy(cod, data, count);
 		cod[count] = '\0';
-		res = smk_parse_long_rule(cod, &rule, 0);
+		res = smk_parse_long_rule(cod, &rule, 0, 0);
 		kfree(cod);
 	}
 
 	if (res)
 		return -EINVAL;
 
-	res = smk_access(rule.smk_subject, rule.smk_object, rule.smk_access,
+	res = smk_access(rule.smk_subject, rule.smk_object, rule.smk_access1,
 			  NULL);
 	data[0] = res == 0 ? '1' : '0';
 	data[1] = '\0';
@@ -2035,10 +2098,8 @@
 	}
 
 	skp = smk_find_entry(cp);
-	if (skp == NULL) {
-		rc = -EINVAL;
+	if (skp == NULL)
 		goto free_out;
-	}
 
 	rule_list = &skp->smk_rules;
 	rule_lock = &skp->smk_rules_lock;
@@ -2077,6 +2138,33 @@
 }
 
 /**
+ * smk_write_change_rule - write() for /smack/change-rule
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_change_rule(struct file *file, const char __user *buf,
+				size_t count, loff_t *ppos)
+{
+	/*
+	 * Must have privilege.
+	 */
+	if (!capable(CAP_MAC_ADMIN))
+		return -EPERM;
+
+	return smk_write_rules_list(file, buf, count, ppos, NULL, NULL,
+				    SMK_CHANGE_FMT);
+}
+
+static const struct file_operations smk_change_rule_ops = {
+	.write		= smk_write_change_rule,
+	.read		= simple_transaction_read,
+	.release	= simple_transaction_release,
+	.llseek		= generic_file_llseek,
+};
+
+/**
  * smk_fill_super - fill the /smackfs superblock
  * @sb: the empty superblock
  * @data: unused
@@ -2125,6 +2213,8 @@
 		[SMK_REVOKE_SUBJ] = {
 			"revoke-subject", &smk_revoke_subj_ops,
 			S_IRUGO|S_IWUSR},
+		[SMK_CHANGE_RULE] = {
+			"change-rule", &smk_change_rule_ops, S_IRUGO|S_IWUSR},
 		/* last one */
 			{""}
 	};
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index a2ee362..f0b756e 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -536,7 +536,7 @@
 };
 
 /* Lock for GC. */
-struct srcu_struct tomoyo_ss;
+DEFINE_SRCU(tomoyo_ss);
 
 /**
  * tomoyo_init - Register TOMOYO Linux as a LSM module.
@@ -550,8 +550,7 @@
 	if (!security_module_enable(&tomoyo_security_ops))
 		return 0;
 	/* register ourselves with the security framework */
-	if (register_security(&tomoyo_security_ops) ||
-	    init_srcu_struct(&tomoyo_ss))
+	if (register_security(&tomoyo_security_ops))
 		panic("Failure registering TOMOYO Linux");
 	printk(KERN_INFO "TOMOYO Linux initialized\n");
 	cred->security = &tomoyo_kernel_domain;